赞
踩
1.上传文件:hadoop.tar.gz
[root@node01 ~]# tar -zxvf hadoop.tar.gz
[root@node01 ~]# mv hadoop /opt/bdp/
[root@node01 ~]# cd /opt/bdp/hadoop/etc/hadoop/
2.修改集群环境:[root@node01 hadoop]# vim hadoop-env.sh
##直接在文件的最后添加
export JAVA_HOME=/usr/java/jdk1.8
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_ZKFC_USER=root
export HDFS_JOURNALNODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
3.修改配置文件
[root@node01 hadoop]# vim core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://hdfs-a</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/a/hadoop/ha</value>
</property>
<property>
<name>hadoop.http.staticuser.user</name>
<value>root</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>node01:2181,node02:2181,node03:2181</value>
</property>
[root@node01 hadoop]# vim hdfs-site.xml
<property>
<name>dfs.nameservices</name>
<value>hdfs-a</value>
</property>
<property>
<name>dfs.ha.namenodes.hdfs-a</name> <value>nn1,nn2</value> </property> <property> <name>dfs.namenode.rpc-address.hdfs-a.nn1</name> <value>node01:8020</value> </property> <property> <name>dfs.namenode.rpc-address.hdfs-a.nn2</name> <value>node02:8020</value> </property> <property> <name>dfs.namenode.http-address.hdfs-a.nn1</name> <value>node01:9870</value> </property> <property> <name>dfs.namenode.http-address.hdfs-a.nn2</name> <value>node02:9870</value> </property> <property> <name>dfs.namenode.shared.edits.dir</name> <value>qjournal://node01:8485;node02:8485;node03:8485/hdfs-a</value> </property> <property> <name>dfs.journalnode.edits.dir</name> <value>/var/a/hadoop/ha/qjm</value> </property> <property> <name>dfs.client.failover.proxy.provider.hdfs-a</name> <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxy Provider</value> </property> <property> <name>dfs.ha.fencing.methods</name> <value>sshfence</value> <value>shell(true)</value> </property> <property> <name>dfs.ha.fencing.ssh.private-key-files</name> <value>/root/.ssh/id_rsa</value> </property> <property> <name>dfs.ha.automatic-failover.enabled</name> <value>true</value> </property> <property> <name>dfs.replication</name> <value>2</value> </property>
[root@node01 hadoop]# vim workers
node01 node02 node03
4.拷贝分发软件
[root@node02 ~]# scp -r root@node01:/opt/a/hadoop /opt/a/
[root@node03 ~]# scp -r root@node01:/opt/a/hadoop /opt/a/
5. 修改环境变量
[root@node01 hadoop]# vim /etc/profile
export HADOOP_HOME=/opt/a/hadoop export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
将环境变量拷贝到其他主机
[root@node01 a]# scp /etc/profile root@node02:/etc/profile [root@node01 a]# scp /etc/profile root@node03:/etc/profile
重新加载三台服务器的环境变量
【123】# source /etc/profile
6.先启动Zookeeper
【123】zkServer.sh start
【123】zkServer.sh status
7.启动JournalNode
【123】 hdfs --daemon start journalnode
8.格式化NameNode
[root@node01 a]# hdfs namenode -format
[root@node01 a]# hdfs --daemon start namenode
[root@node02 a]# hdfs namenode -bootstrapStandby
[root@node01 a]# hdfs zkfc -formatZK
[root@node01 a]# start-dfs.sh
9.测试集群
http://node01:9870
http://node02:9870
[root@node01 ~]# hdfs dfs -mkdir -p /a
[root@node01 ~]# hdfs dfs -put zookeeper-3.4.5.tar.gz /a/
[root@node01 ~]# hdfs dfs -D dfs.blocksize=1048576 -put zookeeper.tar.gz /a/
10.关闭集群
[root@node01 ~]# stop-dfs.sh
[123]#zkServer.sh stop
[123]# shutdown -h now
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。