赞
踩
- <!-- 指定hdfs的nameservice为ns1 -->
- <property>
- <name>fs.defaultFS</name>
- <value>hdfs://ns1/</value>
- </property>
- <!-- 指定hadoop临时目录 -->
- <property>
- <name>hadoop.tmp.dir</name>
- <value>/usr/apps/ha-hadoop/hadoop-2.7.1/data</value>
- </property>
-
- <!-- 指定客户端访问zookeeper的地址 -->
- <property>
- <name>ha.zookeeper.quorum</name>
- <value>master:2181,slave1:2181,slave2:2181</value>
- </property>
- <!-- 指定用户为root -->
- <property>
- <name>hadoop.http.staticuser.user</name>
- <value>root</value>
- </property>
- <property>
- <name>dfs.nameservices</name>
- <value>ns1</value>
- </property>
- <!-- ns1下面有两个NameNode,分别是nn1,nn2 -->
- <property>
- <name>dfs.ha.namenodes.ns1</name>
- <value>nn1,nn2</value>
- </property>
- <!-- nn1的RPC通信地址 -->
- <property>
- <name>dfs.namenode.rpc-address.ns1.nn1</name>
- <value>master:9000</value>
- </property>
- <!-- nn2的RPC通信地址 -->
- <property>
- <name>dfs.namenode.rpc-address.ns1.nn2</name>
- <value>slave1:9000</value>
- </property>
- !-- nn1的http通信地址 -->
- <property>
- <name>dfs.namenode.http-address.ns1.nn1</name>
- <value>master:50070</value>
- </property>
- <!-- nn2的http通信地址 -->
- <property>
- <name>dfs.namenode.http-address.ns1.nn2</name>
- <value>slave1:50070</value>
- </property>
- <!-- namenode存储路径 -->
- <property>
- <name>dfs.namenode.name.dir</name>
- <value>file:///usr/apps/ha-hadoop/hadoop-2.7.1/data/dfs/nn/name</value>
- </property>
- <!-- datanode存储路径 -->
- <property>
- <name>dfs.datanode.data.dir</name>
- <value>file:///usr/apps/ha-hadoop/hadoop-2.7.1/data/dfs/dn</value>
- </property>
- <!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
- <property>
- <name>dfs.namenode.shared.edits.dir</name>
- <value>qjournal://master:8485;slave1:8485;slave2:8485/ns1</value>
- </property>
- <!--指定JournalNode在本地磁盘存放数据的位置 -->
- <property>
- <name>dfs.journalnode.edits.dir</name>
- <value>/usr/apps/ha-hadoop/hadoop-2.7.1/journaldata</value>
- </property>
- <!-- 开启NameNode失败自动切换 -->
- <property>
- <name>dfs.ha.automatic-failover.enabled</name>
- <value>true</value>
- </property>
- <!-- 配置失败自动切换实现方式 -->
- <property>
- <name>dfs.client.failover.proxy.provider.ns1</name>
- <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
- </property>
- !--配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
- <property>
- <name>dfs.ha.fencing.methods</name>
- <value>sshfence</value>
- </property>
- <!-- 使用sshfence隔离机制时需要ssh免登陆-->
- <property>
- <name>dfs.ha.fencing.ssh.private-key-files</name>
- <value>/root/.ssh/id_rsa</value>
- </property>
- <!--配置sshfence隔离机制超时时间 -->
- <property>
- <name>dfs.ha.fencing.ssh.connect-timeout</name>
- <value>30000</value>
- </property>
- <!-- 指定mr运行在yarn上 -->
- <property>
- <name>mapreduce.framework.name</name>
- <value>yarn</value>
- </property>
- <property>
- <name>mapreduce.jobhistory.address</name>
- <value>master:10020</value>
- </property>
-
-
- <property>
- <name>mapreduce.jobhistory.webapp.address</name>
- <value>master:19888</value>
- </property>
- <property>
- <name>mapreduce.job.ubertask.enable</name>
- <value>true</value>
- </property>
-
- <!-- 开启RM高可用 -->
- <property>
- <name>yarn.resourcemanager.ha.enabled</name>
- <value>true</value>
- </property>
- <!-- 指定RM的cluster id -->
- <property>
- <name>yarn.resourcemanager.cluster-id</name>
- <value>yarncluster</value>
- </property>
-
- <!-- 指定RM的名字 -->
- <property>
- <name>yarn.resourcemanager.ha.rm-ids</name>
- <value>rm1,rm2</value>
- </property>
- <!-- 分别指定RM的地址 -->
- <property>
- <name>yarn.resourcemanager.hostname.rm1</name>
- <value>master</value>
- </property>
-
- <property>
- <name>yarn.resourcemanager.hostname.rm2</name>
- <value>slave1</value>
- </property>
- <!-- 指定zk集群地址 -->
- <property>
- <name>yarn.resourcemanager.zk-address</name>
- <value>master:2181,slave1:2181,slave2:2181</value>
- </property>
- <property>
- <name>yarn.nodemanager.aux-services</name>
- <value>mapreduce_shuffle</value>
- </property>
- <property>
- <name>yarn.log-aggregation-enable</name>
- <value>true</value>
- </property>
- <property>
- <name>yarn.log-aggregation.retain-seconds</name>
- <value>604800</value>
- </property>
注:需要配置JAVA_HOME;HADOOP_HOME;ZOOKEEPER_HOME和对应的PATH
scp -r ha-hadoop/ slave2:/usr/apps/ha-hadoop
scp -r /etc/profile slave2:/etc
ssh免密的私钥:scp -r id_rsa slave2:/root/.ssh
注意:修改profile文件,需要source /etc/profile
进入zookeeper目录:bin/zkServer.sh start
或者
需要配置zookeeper环境变量:sh zkServer.sh start
进入hadoop安装目录:sbin/hadoop-daemon.sh start journalnode
(启动datanode:sbin/hadoop-daemon.sh start datanode)
在主节点master上执行:bin/hdfs namenode -format
启用高可用把主节点生成的data文件夹传到slave1中:scp -r data slave1:/usr/apps/ha-hadoop/hadoop-2.7.1/
master上格式化ZKFC:bin/hdfs zkfc -formatZK
注:中间报错,可能是hdfs-site.xml配置错误
启动dfs,可以看到两个namenode启动:sbin/start-dfs.sh
yarn的高可用性:
主节点master:sbin/start-yarn.sh
slave1节点: sbin/yarn-daemon.sh start resourcemanager
验证:一个IP是standy,另一个是active,可以kill掉active的namenode,HDFS可以正常访问
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。