赞
踩
同样适用于Hadoop 3.0.0
ssh-copy-id hostname
将本机的id_rsa.pub内容复制到远程服务器的.ssh/authorized_key中服务名 | 安装服务器 |
---|---|
java8 | bigdata001/2/3 |
NameNode | bigdata001 |
DataNode | bigdata002/3 |
ResourceManager | bigdata001 |
NodeManager | bigdata002/3 |
执行下面的命令进行下载和解压
curl -O https://ftp.nluug.nl/internet/apache/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz
tar -zxvf hadoop-3.3.1.tar.gz
进入hadoop目录
[root@bigdata001 opt]#
[root@bigdata001 opt]# cd hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# pwd
/opt/hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]#
创建pids和logs文件
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# mkdir pids
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# mkdir logs
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# ls
bin etc include lib libexec LICENSE-binary licenses-binary LICENSE.txt logs NOTICE-binary NOTICE.txt pids README.txt sbin share
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# pwd
/opt/hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]#
修改etc/hadoop/hadoop-env.sh文件
修改部分:
export JAVA_HOME=/opt/jdk1.8.0_201
export HADOOP_PID_DIR=/opt/hadoop-3.3.1/pids
export HADOOP_LOG_DIR=/opt/hadoop-3.3.1/logs
export HDFS_NAMENODE_USER=root
添加部分
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
修改etc/hadoop/core-site.xml
添加部分:
<property> <name>fs.defaultFS</name> <value>hdfs://bigdata001:9000</value> </property> <property> <name>io.file.buffer.size</name> <value>131072</value> </property> <property> <name>hadoop.proxyuser.root.hosts</name> <value>*</value> <description>Hadoop的超级用户root能代理的节点</description> </property> <property> <name>hadoop.proxyuser.root.groups</name> <value>*</value> <description>Hadoop的超级用户root能代理的用户组</description> </property>
修改etc/hadoop/hdfs-site.xml
添加namenode和datanode文件夹
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# mkdir namenode
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# mkdir datanode
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# pwd
/opt/hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]#
添加部分:
<property> <name>dfs.replication</name> <value>1</value> </property> <property> <name>dfs.namenode.name.dir</name> <value>/opt/hadoop-3.3.1/namenode</value> </property> <property> <name>dfs.blocksize</name> <value>268435456</value> </property> <property> <name>dfs.namenode.handler.count</name> <value>100</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>/opt/hadoop-3.3.1/datanode</value> </property>
修改etc/hadoop/mapred-site.xml
添加部分:
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
修改etc/hadoop/yarn-site.xml
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# pwd
/opt/hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# mkdir nm-local-dir
[root@bigdata001 hadoop-3.3.1]# mkdir nm-log-dir
[root@bigdata001 hadoop-3.3.1]# mkdir nm-remote-app-log-dir
[root@bigdata001 hadoop-3.3.1]#
添加部分:
<property> <name>yarn.acl.enable</name> <value>false</value> </property> <property> <name>yarn.log-aggregation-enable</name> <value>false</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>${yarn.resourcemanager.hostname}:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>${yarn.resourcemanager.hostname}:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>${yarn.resourcemanager.hostname}:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>${yarn.resourcemanager.hostname}:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>${yarn.resourcemanager.hostname}:8088</value> </property> <property> <name>yarn.resourcemanager.hostname</name> <value>bigdata001</value> </property> <property> <name>yarn.resourcemanager.scheduler.class</name> <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value> </property> <property> <name>yarn.scheduler.minimum-allocation-mb</name> <value>1024</value> </property> <property> <name>yarn.scheduler.maximum-allocation-mb</name> <value>8192</value> </property> <property> <name>yarn.resourcemanager.nodes.include-path</name> <value></value> </property> <property> <name>yarn.nodemanager.resource.memory-mb</name> <value>8192</value> </property> <property> <name>yarn.nodemanager.vmem-pmem-ratio</name> <value>2.1</value> </property> <property> <name>yarn.nodemanager.local-dirs</name> <value>/opt/hadoop-3.3.1/nm-local-dir</value> </property> <property> <name>yarn.nodemanager.log-dirs</name> <value>/opt/hadoop-3.3.1/nm-log-dir</value> </property> <property> <name>yarn.nodemanager.log.retain-seconds</name> <value>10800</value> </property> <property> <name>yarn.nodemanager.remote-app-log-dir</name> <value>/opt/hadoop-3.3.1/nm-remote-app-log-dir</value> </property> <property> <name>yarn.nodemanager.remote-app-log-dir-suffix</name> <value>logs</value> </property> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.env-whitelist</name> <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ,HADOOP_MAPRED_HOME</value> </property>
bigdata002
bigdata003
将bigdata001上配置的hadoop目录分发到其余两台服务器
[root@bigdata001 opt]# scp -r /opt/hadoop-3.3.1 root@bigdata002:/opt
[root@bigdata001 opt]# scp -r /opt/hadoop-3.3.1 root@bigdata003:/opt
export HADOOP_HOME=/opt/hadoop-3.3.1
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@bigdata001 ~]#
[root@bigdata001 ~]# source /etc/profile
[root@bigdata001 ~]#
hdfs初始化
[root@bigdata001 opt]#
[root@bigdata001 opt]# hdfs namenode -format
[root@bigdata001 opt]#
启动hdfs
[root@bigdata001 opt]#
[root@bigdata001 opt]# start-dfs.sh
[root@bigdata001 opt]#
启动yarn
[root@bigdata001 opt]#
[root@bigdata001 opt]# start-yarn.sh
[root@bigdata001 opt]#
访问http://bigdata001:8088
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。