赞
踩
hadoop1 192.168.38.150 jdk1.8
hadoop2 192.168.38.151 jdk1.8
hadoop3 192.168.38.152 jdk1.8
#3台
yum install ssh pdsh
#修改host
vim /etc/hosts
192.168.38.150 hadoop1
192.168.38.151 hadoop2
192.168.38.152 hadoop3
#修改主机名
hostnamectl set-hostname hadoop1
hostnamectl set-hostname hadoop2
hostnamectl set-hostname hadoop3
#3台解压 tar -zxvf hadoop-3.1.3.tar.gz #3台配置环境变量 vim /etc/profile export HDFS_NAMENODE_USER=root export HDFS_DATANODE_USER=root export HDFS_SECONDARYNAMENODE_USER=root export YARN_RESOURCEMANAGER_USER=root export YARN_NODEMANAGER_USER=root export JAVA_HOME=/usr/local/jdk1.8 export HADOOP_HOME=/usr/local/hadoop-3.1.3 export PATH=.:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH source /etc/profile #查看是否配置成功 hadoop version
#hadoop 1 ssh-keygen -t rsa ssh-copy-id hadoop1 ssh-copy-id hadoop2 ssh-copy-id hadoop3 #hadoop 2 ssh-keygen -t rsa ssh-copy-id hadoop1 ssh-copy-id hadoop2 ssh-copy-id hadoop3 #hadoop 3 ssh-keygen -t rsa ssh-copy-id hadoop1 ssh-copy-id hadoop2 ssh-copy-id hadoop3
#3台 cd /usr/local/hadoop-3.1.3/etc/hadoop vim core-site.xml <configuration> <!-- 指定 NameNode 的地址 --> <property> <name>fs.defaultFS</name> <value>hdfs://hadoop1:8020</value> </property> <!-- 指定 hadoop 数据的存储目录 --> <property> <name>hadoop.tmp.dir</name> <value>/usr/local/hadoop-3.1.3/data</value> </property> </configuration>
#3台 vim hdfs-site.xml <configuration> <!-- NameNode web 端访问地址--> <property> <name>dfs.namenode.http-address</name> <value>hadoop1:9870</value> </property> <!-- SecondNameNode web 端访问地址--> <property> <name>dfs.namenode.secondary.http-address</name> <value>hadoop3:9868</value> </property> </configuration>
#3台 vim yarn-site.xml <configuration> <!-- Site specific YARN configuration properties --> <!-- 指定 MR 走 shuffle --> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <!-- 指定 ResourceManager 的地址--> <property> <name>yarn.resourcemanager.hostname</name> <value>hadoop2</value> </property> </configuration>
#3台 vim mapred-site.xml <configuration> <!-- 指定 MapReduce 程序运行在 Yarn 上 --> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>yarn.app.mapreduce.am.env</name> <value>HADOOP_MAPRED_HOME=/usr/local/hadoop-3.1.3</value> </property> <property> <name>mapreduce.map.env</name> <value>HADOOP_MAPRED_HOME=/usr/local/hadoop-3.1.3</value> </property> <property> <name>mapreduce.reduce.env</name> <value>HADOOP_MAPRED_HOME=/usr/local/hadoop-3.1.3</value> </property> </configuration>
#3台
vim workers
hadoop1
hadoop2
hadoop3
#3台
vim hadoop-env.sh
export JAVA_HOME=/usr/local/jdk1.8
export HADOOP_CONF_DIR=/usr/local/hadoop-3.1.3/etc/hadoop
#hadoop1 初始化(第一次需要初始化)
hdfs namenode -format
#hadoop01 启动集群
start-dfs.sh
#访问页面
http://192.168.38.150:9870/
## hadoop2上启动
start-yarn.sh
## 查看yarn
http://hadoop2:8088/
jps
#创建目录
hadoop fs -mkdir /wcinput
#上传小文件
hadoop fs -put ./word.txt /wcinput
#执行wordcount
hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-3.1.3.jar wordcount /wcinput /wcoutput
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。