当前位置:   article > 正文

【云计算】Hadoop2(1)

【云计算】Hadoop2(1)

Last login: Wed Mar 27 11:52:21 2024 from 192.168.196.1
[root@hadoop01 ~]#


hadoop02、hadoop03按以上步骤生成密钥,发给另外两台,然后经行ssh免密登录测试。`(此处略)`



> 
> ### hadoop安装配置
> 
> 
> 



  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13

新建用于存放hadoop的文件夹
[root@hadoop01 ~]# mkdir -p /export/servers

进入压缩包的存放目录
[root@hadoop01 ~]# cd /home/cps/Desktop/
[root@hadoop01 Desktop]# ls
hadoop-2.7.1.tar.gz

解压
[root@hadoop01 Desktop]# tar -zxvf hadoop-2.7.1.tar.gz -C /export/servers/

查看确定解压后的位置
[root@hadoop01 hadoop-2.7.1]# pwd
/export/servers/hadoop-2.7.1

配置环境
[root@hadoop01 hadoop-2.7.1]# vi /etc/profile
追加
export HADOOP_HOME=/export/servers/hadoop-2.7.1
export PATH= P A T H : PATH: PATH:HADOOP_HOME/bin:$HADOOP_HOME/sbin

更新
[root@hadoop01 hadoop-2.7.1]# source /etc/profile

查看Hadoop版本
[root@hadoop01 hadoop-2.7.1]# hadoop version
Hadoop 2.7.1
Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git -r 15ecc87ccf4a0228f35af08fc56de536e6ce657a
Compiled by jenkins on 2015-06-29T06:04Z
Compiled with protoc 2.5.0
From source with checksum fc0a1a23fc1868e4d5ee7fa2b28a58a
This command was run using /export/servers/hadoop-2.7.1/share/hadoop/common/hadoop-common-2.7.1.jar
[root@hadoop01 hadoop-2.7.1]#


其他两台一样安装配置`(此处略)`


#### 0. 进入主节点配置目录/etc/hadoop/



  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

[root@hadoop01 ~]# cd /export/servers/hadoop-2.7.1/etc/hadoop/
[root@hadoop01 hadoop]# ll
total 156
-rw-r–r–. 1 10021 10021 4436 Jun 28 2015 capacity-scheduler.xml
-rw-r–r–. 1 10021 10021 1335 Jun 28 2015 configuration.xsl
-rw-r–r–. 1 10021 10021 318 Jun 28 2015 container-executor.cfg
-rw-r–r–. 1 10021 10021 1123 Mar 29 01:12 core-site.xml
-rw-r–r–. 1 10021 10021 3670 Jun 28 2015 hadoop-env.cmd
-rw-r–r–. 1 10021 10021 4240 Mar 29 01:20 hadoop-env.sh
-rw-r–r–. 1 10021 10021 2598 Jun 28 2015 hadoop-metrics2.properties
-rw-r–r–. 1 10021 10021 2490 Jun 28 2015 hadoop-metrics.properties
-rw-r–r–. 1 10021 10021 9683 Jun 28 2015 hadoop-policy.xml
-rw-r–r–. 1 10021 10021 1132 Mar 29 01:21 hdfs-site.xml
-rw-r–r–. 1 10021 10021 1449 Jun 28 2015 httpfs-env.sh
-rw-r–r–. 1 10021 10021 1657 Jun 28 2015 httpfs-log4j.properties
-rw-r–r–. 1 10021 10021 21 Jun 28 2015 httpfs-signature.secret
-rw-r–r–. 1 10021 10021 620 Jun 28 2015 httpfs-site.xml
-rw-r–r–. 1 10021 10021 3518 Jun 28 2015 kms-acls.xml
-rw-r–r–. 1 10021 10021 1527 Jun 28 2015 kms-env.sh
-rw-r–r–. 1 10021 10021 1631 Jun 28 2015 kms-log4j.properties
-rw-r–r–. 1 10021 10021 5511 Jun 28 2015 kms-site.xml
-rw-r–r–. 1 10021 10021 11237 Jun 28 2015 log4j.properties
-rw-r–r–. 1 10021 10021 951 Jun 28 2015 mapred-env.cmd
-rw-r–r–. 1 10021 10021 1431 Mar 29 01:28 mapred-env.sh
-rw-r–r–. 1 10021 10021 4113 Jun 28 2015 mapred-queues.xml.template
-rw-r–r–. 1 root root 950 Mar 29 01:29 mapred-site.xml
-rw-r–r–. 1 10021 10021 758 Jun 28 2015 mapred-site.xml.template
-rw-r–r–. 1 10021 10021 10 Jun 28 2015 slaves
-rw-r–r–. 1 10021 10021 2316 Jun 28 2015 ssl-client.xml.example
-rw-r–r–. 1 10021 10021 2268 Jun 28 2015 ssl-server.xml.example
-rw-r–r–. 1 10021 10021 2250 Jun 28 2015 yarn-env.cmd
-rw-r–r–. 1 10021 10021 4585 Mar 29 01:23 yarn-env.sh
-rw-r–r–. 1 10021 10021 933 Mar 29 01:25 yarn-site.xml


#### 1. 核心配置core-site.xml



  • 1
  • 2
  • 3
  • 4
  • 5

[root@hadoop01 hadoop]# vi core-site.xml


fs.defaultFS

hdfs://192.168.196.71:9000



hadoop.tmp.dir
/export/servers/hadoop-2.7.1/tmp


#### 2. HDFS配置hadoop-env.sh



  • 1
  • 2
  • 3
  • 4
  • 5

[root@hadoop01 hadoop]# vi hadoop-env.sh

配置JAVA_HOME

The java implementation to use.

export JAVA_HOME=/usr/java/jdk1.8.0_281-amd64


#### 3. HDFS配置hdfs-site.xml



  • 1
  • 2
  • 3
  • 4
  • 5

[root@hadoop01 hadoop]# vi hdfs-site.xml

<!--指定HDFS的数量.........................-->
<property>
    <name>dfs.replication</name>
    <value>3</value>
</property>

<!--secondary namenode 辅助节点所在主机的IP和端口-->
<property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>192.168.196.73:50090</value>
</property>
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

#### 4. YARN配置yarn-env.sh



  • 1
  • 2
  • 3
  • 4
  • 5

配置JAVA_HOME


#### 5. YARN配置yarn-site.xml



  • 1
  • 2
  • 3
  • 4
  • 5

[root@hadoop01 hadoop]# vi yarn-site.xml

yarn.resourcemanager.hostname 192.168.196.72 yarn.nodemanager.aux-services mapreduce_shuffle

#### 6. MapReduce配置mapred-env.sh



  • 1
  • 2
  • 3
  • 4
  • 5

配置JAVA_HOME


#### 7. MapReduce配置mapred-site.xml



  • 1
  • 2
  • 3
  • 4
  • 5

拷贝template模板
[root@hadoop01 hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@hadoop01 hadoop]# vi mapred-site.xml

mapreduce.framework.name yarn


> 
> ### 集群分发配置文件
> 
> 
> 


#### 一、分发脚本xsync



  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13

[root@hadoop01 bin]# pwd
/bin
[root@hadoop01 bin]# vi xsync



  • 1
  • 2

#!/bin/bash
#获取输入参数个数,如果没有参数,直接退出
pcount=$#
if((pcount==0)); then
echo no args;
exit;
fi

#获取文件名称
p1=$1
fname=basename $p1
echo fname=$fname

#获取上级目录到绝对路径
pdir=~cd -P $(dirname Sp1); pwd
echo pdir=$pdir

#获取当前用户名称
user=whoami
#循环
for((host=72; host<74 ; host++)); do
echo ====== rsync -rvl p d i r / pdir/ pdir/fname u s e r @ 192.168.196. user@192.168.196. user@192.168.196.host:$pdir ======
rsync -rvl p d i r / pdir/ pdir/fname u s e r @ 192.168.196. user@192.168.196. user@192.168.196.host:$pdir
done



  • 1
  • 2

[root@hadoop01 bin]# chmod 777 xsync


#### 二、集群分发配置



  • 1
  • 2
  • 3
  • 4
  • 5

[root@hadoop01 bin]# cd /export/servers/hadoop-2.7.1/etc/

[root@hadoop01 etc]# xsync hadoop/
fname=hadoop
/usr/bin/xsync: line 15: ~cd: command not found
pdir=/export/servers/hadoop-2.7.1/etc
====== rsync -rvl /export/servers/hadoop-2.7.1/etc/hadoop root@192.168.196.72:/export/servers/hadoop-2.7.1/etc ======
sending incremental file list
hadoop/.mapred-env.sh.swp
hadoop/core-site.xml
hadoop/hadoop-env.sh
hadoop/hdfs-site.xml
hadoop/mapred-env.sh
hadoop/mapred-site.xml
hadoop/yarn-env.sh
hadoop/yarn-site.xml

sent 19,167 bytes received 295 bytes 3,538.55 bytes/sec
total size is 91,049 speedup is 4.68
====== rsync -rvl /export/servers/hadoop-2.7.1/etc/hadoop root@192.168.196.73:/export/servers/hadoop-2.7.1/etc ======
sending incremental file list
hadoop/.mapred-env.sh.swp
hadoop/core-site.xml
hadoop/hadoop-env.sh
hadoop/hdfs-site.xml
hadoop/mapred-env.sh
hadoop/mapred-site.xml
hadoop/yarn-env.sh
hadoop/yarn-site.xml

sent 19,167 bytes received 295 bytes 2,994.15 bytes/sec
total size is 91,049 speedup is 4.68
[root@hadoop01 etc]#


#### 三、查看文件分发情况


![在这里插入图片描述](https://img-blog.csdnimg.cn/direct/0b55e81a392c4da0a4780abc4b06f372.png)hadoop03检查文件分发情况`(此处略)`



  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

[root@hadoop02 ~]# source /etc/profile
[root@hadoop03 ~]# source /etc/profile



> 
> ### 集群:单节点启动
> 
> 
> 


#### hadoop01


##### 格式化


先检查目录`cd /export/servers/hadoop-2.7.1`下是否存在logs和tmp。  
 若有,先删除 `rm -rf tmp/ logs/`,三台机都要查!!!  
 这次才是干净的目录
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小蓝xlanll/article/detail/545160
推荐阅读
相关标签