赞
踩
systemctl stop firewalld.service
systemctl disable firewalld.service
查看selinux状态,如果为disabled说明已经关闭,
如果为enforcing,需要将enforcing设置为disabled
vim /etc/sysconfig/selinux
将SELINUX=enforcing 修改为SELINUX=disabled
设置完成之后,需要重启生效,重启后查看selinux状态,为disabled说明设置成功
[root@hadoop01 opt]# sestatus
SELinux status: disabled
[root@hadoop01 opt]#
1、卸载自带的openjdk
[root@hadoop01 ~]# java -version openjdk version "1.8.0_161" OpenJDK Runtime Environment (build 1.8.0_161-b14) OpenJDK 64-Bit Server VM (build 25.161-b14, mixed mode) [root@hadoop01 ~]# rpm -qa | grep java python-javapackages-3.4.1-11.el7.noarch java-1.7.0-openjdk-headless-1.7.0.171-2.6.13.2.el7.x86_64 java-1.7.0-openjdk-1.7.0.171-2.6.13.2.el7.x86_64 java-1.8.0-openjdk-headless-1.8.0.161-2.b14.el7.x86_64 java-1.8.0-openjdk-1.8.0.161-2.b14.el7.x86_64 tzdata-java-2018c-1.el7.noarch javapackages-tools-3.4.1-11.el7.noarch [root@hadoop01 ~]# [root@hadoop01 ~]# rpm -qa | grep jdk java-1.7.0-openjdk-headless-1.7.0.171-2.6.13.2.el7.x86_64 java-1.7.0-openjdk-1.7.0.171-2.6.13.2.el7.x86_64 java-1.8.0-openjdk-headless-1.8.0.161-2.b14.el7.x86_64 java-1.8.0-openjdk-1.8.0.161-2.b14.el7.x86_64 copy-jdk-configs-3.3-2.el7.noarch [root@hadoop01 ~]# rpm -e --nodeps java-1.7.0-openjdk-1.7.0.171-2.6.13.2.el7.x86_64 [root@hadoop01 ~]# rpm -e --nodeps java-1.8.0-openjdk-1.8.0.161-2.b14.el7.x86_64 [root@hadoop01 ~]# rpm -e --nodeps java-1.8.0-openjdk-headless-1.8.0.161-2.b14.el7.x86_64 [root@hadoop01 ~]# rpm -e --nodeps java-1.7.0-openjdk-headless-1.7.0.171-2.6.13.2.el7.x86_64 [root@hadoop01 ~]# java -version -bash: /bin/java: No such file or directory
2、上传jdk安装包,使用root用户安装
mkdir -p /usr/java
tar -zxvf jdk-8u192-linux-x64.tar.gz -C /usr/java
3、配置环境变量
vim /etc/profile
export JAVA_HOME=/usr/java/jdk1.8.0_192
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
4、查看是否安装成功
配置好环境变量之后,执行source /etc/profile ,使环境变量生效,查看java -version
vim /etc/hostname
hadoop01
vim /etc/hosts
192.168.110.1 hadoop01
mkdir /opt/module
chown hadoop:hadoop /opt/module
[hadoop@hadoop01 opt]$ tar -zxvf hadoop-2.7.3.tar.gz -C /opt/module/
## 3、配置core-site.xml
vim core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop01:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop-2.7.3/data/tmpdata</value>
</property>
</configuration>
vim hdfs-site.xml
<property> <name>dfs.namenode.name.dir</name> <value>/opt/module/hadoop-2.7.3/data/namenode</value> <description>为了保证元数据的安全一般配置多个不同目录</description> </property> <property> <name>dfs.datanode.data.dir</name> <value>/opt/module/hadoop-2.7.3/data/datanode</value> <description>datanode 的数据存储目录</description> </property> <property> <name>dfs.replication</name> <value>1</value> <description>HDFS 的数据块的副本存储个数, 默认是3</description> </property>
[hadoop@hadoop01 hadoop]$ cp mapred-site.xml.template mapred-site.xml
[hadoop@hadoop01 hadoop]$
[hadoop@hadoop01 hadoop]$ vim mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- YARN 集群为 MapReduce 程序提供的 shuffle 服务 -->
vim slaves
hadoop01
因为是伪分布式,所以只填入hadoop01一台机器的主机名
8.1 由于是用hadoop用户登陆的,环境变量是~/.bashrc
vim ~/.bashrc
# HADOOP_HOME
export HADOOP_HOME=/opt/module/hadoop-2.7.3
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
# JAVA_HOME
export JAVA_HOME=/usr/java/jdk1.8.0_192
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
8.2 使配置生效
[hadoop@hadoop01 ~]$ source ~/.bashrc
8.3 查看hadoop版本:hadoop version
[hadoop@hadoop01 ~]$ hadoop version
Hadoop 2.7.3
Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git -r baa91f7c6bc9cb92be5982de4719c1c8af91ccff
Compiled by root on 2016-08-18T01:41Z
Compiled with protoc 2.5.0
From source with checksum 2e4ce5f957ea4db193bce3734ff29ff4
This command was run using /opt/module/hadoop-2.7.3/share/hadoop/common/hadoop-common-2.7.3.jar
[hadoop@hadoop01 ~]$ mkdir -p /opt/module/hadoop-2.7.3/data/namenode
[hadoop@hadoop01 ~]$ mkdir -p /opt/module/hadoop-2.7.3/data/datanode
[hadoop@hadoop01 ~]$
[hadoop@hadoop01 ~]$ mkdir -p /opt/module/hadoop-2.7.3/data/tmpdata
[hadoop@hadoop01 ~]$ hadoop namenode -format
如下图所示出现status 0即为初始化成功。
[hadoop@hadoop01 ~]$ cd /opt/module/hadoop-2.7.3/
[hadoop@hadoop01 hadoop-2.7.3]$ sbin/start-dfs.sh
[hadoop@hadoop01 hadoop-2.7.3]$ sbin/start-yarn.sh
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。