.ssh/authorized_keys_本机host中添加集群主机名称和ip">
赞
踩
hostname //显示主机名
hostnamectl set-hostname 主机名
添加各个机器的地址和主机名:vi /etc/hosts
生成密钥:
ssh-keygen -t rsa -P ""
cat ~/.ssh/id_rsa.pub > .ssh/authorized_keys
ssh-copy-id -i ~/.ssh/id_rsa.pub root@hadoop100 //→ yes → 密码
Hadoop安装包:hadoop-2.6.0-cdh5.14.2.tar.gz
Hadoop-native安装包:hadoop-native-64-2.6.0.tar
tar -zxvf hadoop-2.6.0-cdh5.14.2.tar.gz
mv hadoop-2.6.0-cdh5.14.2 hadoop //改名便于操作
rm -f hadoop-2.6.0-cdh5.14.2.tar.gz
进入目录 → /opt/software/hadoop/etc/hadoop
# The java implementation to use.
export JAVA_HOME=/opt/software/jdk1.8.0_221 //jdk的安装地址
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://hadoop100:9000</value> //IP地址,或者主机名 </property> <property> <name>hadoop.tmp.dir</name> //临时文件的目录 <value>/root/software/hadoop/tmp</value> //临时文件目录地址 </property> <property> <name>hadoop.proxyuser.root.hosts</name> <value>*</value> </property> <property> <name>hadoop.proxyuser.root.groups</name> <value></value> </property> </confoguration>
<configuration>
<property>
<name>dfc.replication</name>
<value>1</value> //本地不写,伪分布式写1,全分布式几个节点写几
</property>
</confoguration>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</confoguration>
修改完改名:mv mapred-site.xml.template mapred-site.xml
configuration>
<property>
<name>yan.nademanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yan.nademanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop100</value> //本机名
</property>
</confoguration>
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin
export HADOOP_HOME=/root/software/hadoop //hadoop安装地址
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
hdfs namenode -format
start-all.sh
jps //显示如下6个项目,怎表示配置并启动成功
logs/hadoop-root-DataNode-hadoop100.log //DataNode缺少的项目的名称
上面已经安装好单机的Hadoop
搭建集群之前各个机器之间增加互信,可以免密登录
scp -r /opt/software/hadoop root@hadoop101:/opt/software/
scp /etc/profile root@hadoop101:/etc/ //传完之后新机器source /etc/profile
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop101:50090</value> //选择secondary node
</property>
</configuration>
hadoop100
hadoop101
hadoop102
hdfs namenode -format //格式化前如果software、hadoop目录下有tmp 、logs的话需要先删除
start-all.sh
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。