当前位置:   article > 正文

Centos7上Hadoop 3.3.1的分布式集群安装过程_centos上hadoop 3.3.5

centos上hadoop 3.3.5

同样适用于Hadoop 3.0.0

1. 集群规划

  • 每台服务器相互设置ssh无密码登录,注意authorized_keys权限为600。可以用ssh-copy-id hostname将本机的id_rsa.pub内容复制到远程服务器的.ssh/authorized_key中
服务名安装服务器
java8bigdata001/2/3
NameNodebigdata001
DataNodebigdata002/3
ResourceManagerbigdata001
NodeManagerbigdata002/3

2. 下载(在bigdata001操作)

执行下面的命令进行下载和解压

curl -O https://ftp.nluug.nl/internet/apache/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz

tar -zxvf hadoop-3.3.1.tar.gz
  • 1
  • 2
  • 3

进入hadoop目录

[root@bigdata001 opt]# 
[root@bigdata001 opt]# cd hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# pwd
/opt/hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

3. 配置文件修改(在bigdata001操作)

3.1 hadoop-env.sh

创建pids和logs文件

[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# mkdir pids				
[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# mkdir logs				
[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# ls
bin  etc  include  lib  libexec  LICENSE-binary  licenses-binary  LICENSE.txt  logs  NOTICE-binary  NOTICE.txt  pids  README.txt  sbin  share
[root@bigdata001 hadoop-3.3.1]#
[root@bigdata001 hadoop-3.3.1]# pwd
/opt/hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]#
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

修改etc/hadoop/hadoop-env.sh文件

修改部分:

export JAVA_HOME=/opt/jdk1.8.0_201       
export HADOOP_PID_DIR=/opt/hadoop-3.3.1/pids				
export HADOOP_LOG_DIR=/opt/hadoop-3.3.1/logs				

export HDFS_NAMENODE_USER=root
  • 1
  • 2
  • 3
  • 4
  • 5

添加部分

export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
  • 1
  • 2
  • 3
  • 4

3.2 core-site.xml

修改etc/hadoop/core-site.xml

添加部分:

    <property>
        <name>fs.defaultFS</name>		
        <value>hdfs://bigdata001:9000</value>
    </property>
    
    <property>
        <name>io.file.buffer.size</name>				
        <value>131072</value>
    </property>

    <property>
        <name>hadoop.proxyuser.root.hosts</name>
        <value>*</value>
        <description>Hadoop的超级用户root能代理的节点</description>
    </property>

    <property>
        <name>hadoop.proxyuser.root.groups</name>
        <value>*</value>
        <description>Hadoop的超级用户root能代理的用户组</description>
    </property>


  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23

3.3 hdfs-site.xml

修改etc/hadoop/hdfs-site.xml

添加namenode和datanode文件夹

[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# mkdir namenode
[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# mkdir datanode
[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# pwd
/opt/hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

添加部分:

    <property>
        <name>dfs.replication</name>			
        <value>1</value>
    </property>

    <property>
        <name>dfs.namenode.name.dir</name>				
        <value>/opt/hadoop-3.3.1/namenode</value>
    </property>
	
    <property>
        <name>dfs.blocksize</name>					
        <value>268435456</value>
    </property>

    <property>
        <name>dfs.namenode.handler.count</name>			
        <value>100</value>
    </property>
    
    <property>
        <name>dfs.datanode.data.dir</name>					
        <value>/opt/hadoop-3.3.1/datanode</value>
    </property>


  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26

3.4 mapred-site.xml

修改etc/hadoop/mapred-site.xml

添加部分:

    <property>
        <name>mapreduce.framework.name</name>		
        <value>yarn</value>
    </property>
  • 1
  • 2
  • 3
  • 4

3.5 yarn-site.xml

修改etc/hadoop/yarn-site.xml

[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# pwd
/opt/hadoop-3.3.1
[root@bigdata001 hadoop-3.3.1]# 
[root@bigdata001 hadoop-3.3.1]# mkdir nm-local-dir
[root@bigdata001 hadoop-3.3.1]# mkdir nm-log-dir
[root@bigdata001 hadoop-3.3.1]# mkdir nm-remote-app-log-dir
[root@bigdata001 hadoop-3.3.1]# 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

添加部分:


    <property>
        <name>yarn.acl.enable</name>				
        <value>false</value>
    </property>

    <property>
        <name>yarn.log-aggregation-enable</name>			
        <value>false</value>
    </property>

    <property>
        <name>yarn.resourcemanager.address</name>					
        <value>${yarn.resourcemanager.hostname}:8032</value>
    </property>
	
	
    <property>
        <name>yarn.resourcemanager.scheduler.address</name>			
        <value>${yarn.resourcemanager.hostname}:8030</value>
    </property>
	
    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>			
        <value>${yarn.resourcemanager.hostname}:8031</value>
    </property>
	
    <property>
        <name>yarn.resourcemanager.admin.address</name>			
        <value>${yarn.resourcemanager.hostname}:8033</value>
    </property>

    <property>
        <name>yarn.resourcemanager.webapp.address</name>		
        <value>${yarn.resourcemanager.hostname}:8088</value>
    </property>
	
    <property>
        <name>yarn.resourcemanager.hostname</name>   			
        <value>bigdata001</value>
    </property>
	
    <property>
        <name>yarn.resourcemanager.scheduler.class</name>			
        <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
    </property>
	
    <property>
        <name>yarn.scheduler.minimum-allocation-mb</name>			
        <value>1024</value>
    </property>
	
    <property>
        <name>yarn.scheduler.maximum-allocation-mb</name>			
        <value>8192</value>
    </property>
	
    <property>
        <name>yarn.resourcemanager.nodes.include-path</name>			
        <value></value>
    </property>
	
    <property>
        <name>yarn.nodemanager.resource.memory-mb</name>			
        <value>8192</value>
    </property>

    <property>
        <name>yarn.nodemanager.vmem-pmem-ratio</name>				
        <value>2.1</value>
    </property>
	
    <property>
        <name>yarn.nodemanager.local-dirs</name>				
        <value>/opt/hadoop-3.3.1/nm-local-dir</value>
    </property>
	
    <property>
        <name>yarn.nodemanager.log-dirs</name>				
        <value>/opt/hadoop-3.3.1/nm-log-dir</value>
    </property>
	
    <property>
        <name>yarn.nodemanager.log.retain-seconds</name>				
        <value>10800</value>
    </property>
	
    <property>
        <name>yarn.nodemanager.remote-app-log-dir</name>			
        <value>/opt/hadoop-3.3.1/nm-remote-app-log-dir</value>
    </property>
	
    <property>
        <name>yarn.nodemanager.remote-app-log-dir-suffix</name>			
        <value>logs</value>
    </property>

    <property>
        <name>yarn.nodemanager.aux-services</name>			
        <value>mapreduce_shuffle</value>
    </property>

    <property>
        <name>yarn.nodemanager.env-whitelist</name>				
        <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ,HADOOP_MAPRED_HOME</value>
    </property>

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107

3.6 修改workers文件

bigdata002			
bigdata003			
  • 1
  • 2

4. hadoop目录分发(在bigdata001操作)

将bigdata001上配置的hadoop目录分发到其余两台服务器

[root@bigdata001 opt]# scp -r /opt/hadoop-3.3.1 root@bigdata002:/opt
[root@bigdata001 opt]# scp -r /opt/hadoop-3.3.1 root@bigdata003:/opt
  • 1
  • 2

5. 初始化和启动(在bigdata001操作)

5.1 添加环境变量

  1. /etc/profile添加内容如下:
export HADOOP_HOME=/opt/hadoop-3.3.1

export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
  • 1
  • 2
  • 3
  1. 使环境变量生效
[root@bigdata001 ~]# 
[root@bigdata001 ~]# source /etc/profile
[root@bigdata001 ~]#
  • 1
  • 2
  • 3

5.2 HDFS

hdfs初始化

[root@bigdata001 opt]#
[root@bigdata001 opt]# hdfs namenode -format		
[root@bigdata001 opt]#
  • 1
  • 2
  • 3

启动hdfs

[root@bigdata001 opt]#
[root@bigdata001 opt]# start-dfs.sh										
[root@bigdata001 opt]#
  • 1
  • 2
  • 3
  1. 访问http://bigdata001:9870
    HDFS页面

5.3 YARN

启动yarn

[root@bigdata001 opt]# 
[root@bigdata001 opt]# start-yarn.sh													
[root@bigdata001 opt]#
  • 1
  • 2
  • 3

访问http://bigdata001:8088

Yarn页面

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/代码探险家/article/detail/1002453
推荐阅读
相关标签
  

闽ICP备14008679号