我爱编程

Hadoop集群的搭建

2017-07-27  本文已影响0人  pamperxg

useradd hadoop
passwd hadoop
vi /etc/sudoers
yyp (复制粘贴一行)
scp /etc/sudoers mini2:/etc/

uri:统一资源定位符
hdfs://mini1:9000(协议,namenode)
jdbc:mysql://localhost:3306

  1. hadoop-env.sh
    配置$JAVA_HOME
  2. core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://mini1:9000</value>
</property>   (指定文件系统用hdfs,namenode:mini1,端口9000)
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/hdpdata</value>
</property>    (集群上的机器的进程工作的数据目录)
</configuration>
  1. hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
</configuration>
  1. mapred-site.xml.template
<configuration>
<property>
<name>mapreduce.framwork.name</name>
<value>yarn</value>
</property>
</configuration>(mapreduce程序提交完后交给yarn)

mv mapred-site.xml.template mapred-site.xml

  1. yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>mini1</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
pwd
sudo vi /etc/profile
export HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.4
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

再scp到其他mini2,3,4

sudo scp /etc/profile mini4:/etc/
source /etc/profile
hadoop-daemon.sh start namenode
hadoop-daemon.sh start datanode
jps(查看java进程)

http://mini:50070(jetty) 查看节点状态

start-dfs.sh
    stop-dfs.sh
start-yarn.sh
    stop-yarn.sh
ssh-keygen
ssh-copy-id 其他机器主机名
上一篇下一篇

猜你喜欢

热点阅读