赞
踩
vim /etc/hosts
- 192.168.200.154 master
- 192.168.200.155 worker1
- 192.168.200.156 worker2
ssh-copy-id master
ssh-copy-id worker1
ssh-copy-id worker2
yum install -y vim
- systemctl stop firewalld
- systemctl disable firewalld
- setenforce 0
- mkdir /usr/local/jdk
- tar -zxf jdk-8u152-linux-x64.tar.gz -C /usr/local/jdk/
- tar -zxf hadoop-3.3.1.tar.gz -C /usr/local/
- mv /usr/local/hadoop-3.3.1/ /usr/local/hadoop
vim /etc/profile
- #JAVA HOME
- export JAVA_HOME=/usr/local/jdk/jdk1.8.0_152/
- export PATH=$PATH:$JAVA_HOME/bin
-
- #Hadoop
- export HADOOP_HOME=/usr/local/hadoop/
- export PATH=$PATH:$HADOOP_HOME/bin
- export PATH=$PATH:$HADOOP_HOME/sbin
- source /etc/profile
- java -version
- hadoop version
vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
- export JAVA_HOME=/usr/local/jdk/jdk1.8.0_152/
- export HDFS_NAMENODE_USER=root
- export HDFS_DATANODE_USER=root
- export HDFS_SECONDARYNAMENODE_USER=root
- export YARN_RESOURCEMANAGER_USER=root
- export YARN_NODEMANAGER_USER=root
- cd /usr/local/hadoop/etc/hadoop/
- vim core-site.xm
- <configuration>
- <property>
- <name>fs.defaultFS</name>
- <value>hdfs://master:9000</value>
- </property>
- <!-- 临时文件存放位置 -->
- <property>
- <name>hadoop.tmp.dir</name>
- <value>/usr/local/hadoop/tmp</value>
- </property>
- </configuration>
vim hdfs-site.xml
- <property>
- <name>dfs.replication</name>
- <value>2</value>
- </property>
- <!-- namenode存放的位置,老版本是用dfs.name.dir -->
- <property>
- <name>dfs.namenode.name.dir</name>
- <value>/usr/local/hadoop/name</value>
- </property>
- <!-- datanode存放的位置,老版本是dfs.data.dir -->
- <property>
- <name>dfs.datanode.data.dir</name>
- <value>/usr/local/hadoop/data</value>
- </property>
- <!-- 关闭文件上传权限检查 -->
- <property>
- <name>dfs.permissions.enalbed</name>
- <value>false</value>
- </property>
- <!-- namenode运行在哪儿节点,默认是0.0.0.0:9870,在hadoop3.x中端口从原先的50070改为了9870 -->
- <property>
- <name>dfs.namenode.http-address</name>
- <value>master:9870</value>
- </property>
- <!-- secondarynamenode运行在哪个节点,默认0.0.0.0:9868 -->
- <property>
- <name>dfs.namenode.secondary.http-address</name>
- <value>master:9868</value>
- </property>
vim yarn-site.xml
- <property>
- <name>yarn.resourcemanager.hostname</name>
- <value>master</value>
- </property>
- <!-- nodemanager获取数据的方式 -->
- <property>
- <name>yarn.nodemanager.aux-services</name>
- <value>mapreduce_shuffle</value>
- </property>
- <!-- 关闭虚拟内存检查 -->
- <property>
- <name>yarn.nodemanager.vmem-check-enabled</name>
- <value>false</value>
- </property>
vim mapred-site.xml
- <property>
- <name>mapreduce.framework.name</name>
- <value>yarn</value>
- </property>
- <!-- 配了上面这个下面这个也得配, 不然跑mapreduce会找不到主类。MR应用程序的CLASSPATH-->
- <property>
- <name>mapreduce.application.classpath</name>
- <value>/usr/local/hadoop/share/hadoop/mapreduce/*:/usr/local/hadoop/share/hadoop/mapreduce/lib/*</value>
- </property>
vim workers
- worker1
- worker2
- scp /usr/local/hadoop/etc/hadoop/core-site.xml worker1:/usr/local/hadoop/etc/hadoop/core-site.xml
- scp /usr/local/hadoop/etc/hadoop/core-site.xml worker2:/usr/local/hadoop/etc/hadoop/core-site.xml
- scp /usr/local/hadoop/etc/hadoop/hdfs-site.xml worker2:/usr/local/hadoop/etc/hadoop/hdfs-site.xml
- scp /usr/local/hadoop/etc/hadoop/hdfs-site.xml worker1:/usr/local/hadoop/etc/hadoop/hdfs-site.xml
- scp /usr/local/hadoop/etc/hadoop/core-site.xml worker1:/usr/local/hadoop/etc/hadoop/core-site.xml
- scp /usr/local/hadoop/etc/hadoop/core-site.xml worker2:/usr/local/hadoop/etc/hadoop/core-site.xml
- scp /usr/local/hadoop/etc/hadoop/yarn-site.xml worker2:/usr/local/hadoop/etc/hadoop/yarn-site.xml
- scp /usr/local/hadoop/etc/hadoop/yarn-site.xml worker1:/usr/local/hadoop/etc/hadoop/yarn-site.xml
- scp /usr/local/hadoop/etc/hadoop/mapred-site.xml worker1:/usr/local/hadoop/etc/hadoop/mapred-site.xml
- scp /usr/local/hadoop/etc/hadoop/mapred-site.xml worker2:/usr/local/hadoop/etc/hadoop/mapred-site.xml
- scp /usr/local/hadoop/etc/hadoop/workers worker1:/usr/local/hadoop/etc/hadoop/workers
- scp /usr/local/hadoop/etc/hadoop/workers worker2:/usr/local/hadoop/etc/hadoop/workers
- scp /usr/local/hadoop/etc/hadoop/hadoop-env.sh worker1:/usr/local/hadoop/etc/hadoop/hadoop-env.sh
- scp /usr/local/hadoop/etc/hadoop/hadoop-env.sh worker2:/usr/local/hadoop/etc/hadoop/hadoop-env.sh
hdfs namenode -format
start-all.sh
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。