51cto赵强HADOOP学习(七)
2017-12-14 本文已影响0人
lehuai
利用ZK实现Hadoop的HA
利用ZooKeeper实现Hadoop的高可用特性
image.png配置信息
链接:https://pan.baidu.com/s/1geUjpSn 密码:7yoc
一、安装JDK(所有)
#mkdir tools
#mkdir training
#cd tools
#tar -zxvf jdk-8u144-linux-x64.tar.gz -C ~/training/
# cd ~/training/
# vi ~/.bash_profile
JAVA_HOME=/root/training/jdk1.8.0_144
export JAVA_HOME
PATH=$JAVA_HOME/bin:$PATH
export PATH
# source ~/.bash_profile
# vi /etc/hosts
192.168.56.11 hadoop11
192.168.56.12 hadoop12
192.168.56.13 hadoop13
192.168.56.21 hadoop21
192.168.56.22 hadoop22
192.168.56.23 hadoop23
192.168.56.24 hadoop24
hadoop11:
#cd
# ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24
hadoop12:
#cd
# ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24
hadoop13:
#cd
# ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24
hadoop21:
#cd
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24
hadoop22:
#cd
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24
hadoop23:
#cd
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24
hadoop24:
#cd
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24
hadoop11:
#pwd
/root
#cd tools
#tar -zxvf zookeeper-3.4.6.tar.gz -C ~/training
#cd ~/training
# cd zookeeper-3.4.6
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME
PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile
#mkdir data
#cd conf
#cp zoo_sample.cfg zoo.cfg
#vi zoo.cfg
dataDir=/root/training/zookeeper-3.4.6/data
server.1=hadoop11:2888:3888
server.2=hadoop12:2888:3888
server.3=hadoop13:2888:3888
#cd ../data
#echo 1 > myid
#cd ../..
#scp -r zookeeper-3.4.6/ root@hadoop12:/root/training/
#scp -r zookeeper-3.4.6/ root@hadoop13:/root/training/
hadoop12:
#cd training/zookeeper-3.4.6/data/
#echo 2 > myid
#cd ..
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME
PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile
hadoop13:
#cd training/zookeeper-3.4.6/data/
#echo 3 > myid
#cd ..
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME
PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile
hadoop21:
#cd tools
# tar -zxvf hadoop-2.4.1.tar.gz -C ~/training/
#cd ~/training
# cd hadoop-2.4.1/
# vi ~/.bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME
PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source ~/.bash_profile
#cd etc/hadoop/
#vi hadoop-env.sh
export JAVA_HOME=/root/training/jdk1.8.0_144
#mkdir ~/training/hadoop-2.4.1/tmm
#vi core-site.xml
<!-- 指定hdfs的nameservice为ns1 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<!-- 指定hadoop临时目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/root/training/hadoop-2.4.1/tmm</value>
</property>
<!-- 指定zookeeper地址 -->
<property>
<name>ha.zookeeper.quorum</name>
<value>hadoop11:2181,hadoop12:2181,hadoop13:2181</value>
</property>
#vi hdfs-site.xml
<configuration>
<!-- 指定hdfs的nameservice为ns1,需要和core-site.xml中的保持一致 -->
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<!-- ns1下面有两个NameNode,分别是nn1,nn2 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!--nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>hadoop21:9000</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>hadoop21:50070</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>hadoop24:9000</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>hadoop24:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://hadoop21:8485;hadoop24:8485;/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journlanode.edits.dir</name>
<value>/root/training/hadoop-2.4.1/journal</value>
</property>
<!--开启NameNode失败自动切换-->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!--配置失败自动切换实现方式-->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvder</value>
</property>
<!--配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<!--使用sshfence隔离机制时需要ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!--配置sshfence隔离机制超时时间 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
</configuration>
#cp mapred-site.xml.template mapred-site.xml
#vi mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
#vi yarn-site.xml
<configuration>
<!--开启RM高可靠-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!--指定RM的cluster id-->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>yrc</value>
</property>
<!--指定RM的名字-->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<!--分别指定RM的地址-->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>hadoop21</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>hadoop24</value>
</property>
<!--指定zk集群地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>hadoop11:2181,hadoop12:2181,hadoop13:2181</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
#vi slaves
hadoop22
hadoop23
#cd ../../..
#scp -r hadoop-2.4.1/ root@hadoop22:/root/training/
#scp -r hadoop-2.4.1/ root@hadoop23:/root/training/
#scp -r hadoop-2.4.1/ root@hadoop24:/root/training/
hadoop22:
#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME
PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile
hadoop23:
#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME
PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile
hadoop24:
#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME
PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile
启动zookeeper集群
#zkServer.sh start
hadoop21:
#hadoop-daemon.sh start journalnode //hadoop21,24
#hdfs namenode -format
#pwd
/root/training
#scp -r ~/training/hadoop-2.4.1/tmm root@hadoop24:/root/training/
#hdfs zkfc -format
#start-all.sh