Hadoop

配置HA

2019-11-22  本文已影响0人  灬蜂蜜柚子
  1. 机器规划
主机名 IP 软件 进程
master01 192.168.5.100 NameNode,ResourceManager,DfszkFailoverController
master02 192.168.5.101 NameNode,ResourceManager,DfszkFailoverController
slave01 192.168.5.102 zookeeper DataNode,NodeManager,JournalNode,QuorumPeerMain
slave02 192.168.5.103 zookeeper DataNode,NodeManager,JournalNode,QuorumPeerMain
slave03 192.168.5.104 zookeeper DataNode,NodeManager,JournalNode,QuorumPeerMain
  1. 配置core-site.xml

         $ vim etc/hadoop/core-site.xml
    
         <configuration>
    
         <!-- HA 配置指定 hdfs 的 nameService 为ns -->
         <property>
         <name>fs.defaultFS</name>
         <value>hdfs://ns</value>
         </property>
    
         <!-- HA 配置,指定zookeeper地址 -->
         <property>
         <name>ha.zookeeper.quorum</name>
         <value>slave01:2181,slave02:2181,slave03:2181</value>
         </property>
    
         <property>   
         <name>hadoop.tmp.dir</name>   
         <value>/export/data/hadoop/temp</value>
         </property>
    
         <property>
         <name>hadoop.proxyuser.root.hosts</name>
         <value>*</value>
         </property>
    
         <property>
         <name>hadoop.proxyuser.root.groups</name>
         <value>*</value>
         </property>
    
         </configuration>
    
  2. 配置hdfs-site.xml

       $ vim etc/hadoop/hdfs-site.xml
    
       <configuration>
    
       <!--指定hdfs的nameservice为ns,需要和core-site.xml中的保持一致 -->
       <property>
       <name>dfs.nameservices</name>
       <value>ns</value>
       </property>
    
       <!-- ns下面有两个NameNode,分别是nn1,nn2 -->
       <property>
       <name>dfs.ha.namenodes.ns</name>
       <value>nn1,nn2</value>
       </property>
    
       <!-- nn1的RPC通信地址 -->
       <property>
       <name>dfs.namenode.rpc-address.ns.nn1</name>
       <value>master01:9000</value>
       </property>
    
       <!-- nn1的http通信地址 -->
       <property>
       <name>dfs.namenode.http-address.ns.nn1</name>
       <value>master01:50070</value>
       </property>
    
       <!-- nn2的RPC通信地址 -->
       <property>
       <name>dfs.namenode.rpc-address.ns.nn2</name>
       <value>master02:9000</value>
       </property>
    
       <!-- nn2的http通信地址 -->
       <property>
       <name>dfs.namenode.http-address.ns.nn2</name>
       <value>master02:50070</value>
       </property>
    
       <!-- 指定NameNode的edits元数据在JournalNode上的存放位置 -->
       <property>
       <name>dfs.namenode.shared.edits.dir</name>
       <value>qjournal://slave01:8485;slave02:8485;slave03:8485/ns</value>
       </property>
    
       <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
       <property>
       <name>dfs.journalnode.edits.dir</name>
       <value>/export/data/hadoop/journaldata</value>
       </property>
    
       <!-- 开启NameNode失败自动切换 -->
       <property>
       <name>dfs.ha.automatic-failover.enabled</name>
       <value>true</value>
       </property>
    
       <!-- 配置失败自动切换实现方式 -->
       <property>
       <name>dfs.client.failover.proxy.provider.ns</name>        
       <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
       </property>
    
       <!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
       <property>
       <name>dfs.ha.fencing.methods</name>
       <value>
         sshfence
         shell(/bin/true)
       </value>
       </property>
    
       <!-- 使用sshfence隔离机制时需要ssh免登陆 -->
       <property>
       <name>dfs.ha.fencing.ssh.private-key-files</name>
       <value>/root/.ssh/id_rsa</value>
       </property>
    
       <!-- 配置sshfence隔离机制超时时间 -->
       <property>
       <name>dfs.ha.fencing.ssh.connect-timeout</name>
       <value>30000</value>
       </property>
    
       <property>
       <name>dfs.ha.namenodes.jn</name>
       <value>master01,master02</value>
       </property>
    
       </configuration>
    
  3. 配置mapred-site.xml

         $ vim etc/hadoop/mapred-site.xml
    
         <configuration>
    
         <property>
         <name>mapreduce.framework.name</name>
         <value>yarn</value>
         </property>
    
         <property>
         <name>yarn.app.mapreduce.am.env</name>
         <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
         </property>
    
         <property>
         <name>mapreduce.map.env</name>
         <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
         </property>
    
         <property>
         <name>mapreduce.reduce.env</name>
         <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
         </property>
    
         </configuration>
    
  4. 配置yarn-site.xml

         $ vim etc/hadoop/yarn-site.xml
    
         <configuration>
         <!-- Site specific YARN configuration properties -->
    
         <!-- 开启RM高可用 -->
         <property>
         <name>yarn.resourcemanager.ha.enabled</name>
         <value>true</value>
         </property>
    
         <!-- 指定RM的cluster id -->
         <property>
         <name>yarn.resourcemanager.cluster-id</name>
         <value>yarn-ha</value>
         </property>
    
         <!-- 指定RM的名字 -->
         <property>
         <name>yarn.resourcemanager.ha.rm-ids</name>
         <value>rm1,rm2</value>
         </property>
    
         <!-- 分别指定RM的地址 -->
         <property>
         <name>yarn.resourcemanager.hostname.rm1</name>
         <value>master01</value>
         </property>
    
         <property>
         <name>yarn.resourcemanager.hostname.rm2</name>
         <value>master02</value>
         </property>
    
         <!-- 指定zk集群地址 -->
         <property>
         <name>yarn.resourcemanager.zk-address</name>
         <value>slave01:2181,slave02:2181,slave03:2181</value>
         </property>
    
         <property>
         <name>yarn.nodemanager.aux-services</name>
         <value>mapreduce_shuffle</value>
         </property>
    
         </configuration>
    
  1. 启动zookeeper集群(在3台机器上分别启动)

        cd ZOOKEEPER_HOME/bin/ 
        ./zkServer.sh start
    
  2. 查看zookeeper状态

         cd ZOOKEEPER_HOME/bin/
         ./zkServer.sh status
    
  3. 启动 JournalNode 集群(在slave01、slave02、slave03上分别执行)

         hdfs-daemon start journalnode
    
  4. 格式化 ZooKeeper 集群

         hdfs zkfc -formatZK
    
  5. 格式化集群的 NameNode (在master01上执行)

         hdfs namenode -format
    
  6. 启动刚格式化的 NameNode (在master01上执行)

         hdfs-daemon start namenode
    
  7. 同步 NameNode1 元数据到 NameNode2 上 (在master02上执行)

         hdfs namenode -bootstrapStandby
    
  8. 启动 NameNode2 (在master02上执行)

         hdfs-daemon start namenode
    
  9. 启动集群中所有的DataNode (在master01上执行)

         start-dfs.sh
    
  10. 启动 ZKFC 进程 (在 master01和master02的主机上分别执行如下命令)

         hdfs --daemon start zkfc
    
  11. 验证ha(在master01节点停掉namenode进程)

         hdfs --daemon stop namenode
    
  1. 在 RM1 启动 YARN (在master01上执行)

        yarn-daemon start resourcemanager
    
  2. 在 RM2 启动 YARN (在master02上执行)

         yarn-daemon start resourcemanager
    
  3. 在任意节点执行获取resourceManager状态(active)

         yarn rmadmin -getServiceState rm1
    
  4. 在任意节点执行获取resourceManager状态(standby)

          yarn rmadmin -getServiceState rm2
    
  5. 验证 yarn 的 ha(在master01节点执行)standby 的 resourcemanager 则会转换为 active

           yarn-daemon stop resourcemanager
    
上一篇下一篇

猜你喜欢

热点阅读