赞
踩
ssh-keygen
ssh-copy-id root@ied
ssh ied
tar -zxvf jdk-8u231-linux-x64.tar.gz -C /usr/local
ll /usr/local/jdk1.8.0_231
vim /etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_231
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
source /etc/profile
java -version
tar -zxvf hadoop-3.3.4.tar.gz -C /usr/local
ll /usr/local/hadoop-3.3.4
bin目录 - 存放命令脚本
etc/hadoop
目录 - 存放hadoop的配置文件
ib目录 - 存放hadoop运行的依赖jar包
sbin目录 - 存放启动和关闭Hadoop等命令
vim /etc/profile
source /etc/profile
hadoop version
cd etc/hadoop
,进入hadoop配置目录vim hadoop-env.sh
,添加三条环境变量配置source hadoop-env.sh
,让配置生效vim core-site.xml
<configuration>
<!--用来指定hdfs的老大-->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ied:9000</value>
</property>
<!--用来指定hadoop运行时产生文件的存放目录-->
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop-3.3.4/tmp</value>
</property>
</configuration>
hdfs://ied:9000
,否则必须用IP地址hdfs://192.168.1.100:9000
vim hdfs-site.xml
<configuration> <!--设置名称节点的目录--> <property> <name>dfs.namenode.name.dir</name> <value>/usr/local/hadoop-3.3.4/tmp/namenode</value> </property> <!--设置数据节点的目录--> <property> <name>dfs.datanode.data.dir</name> <value>/usr/local/hadoop-3.3.4/tmp/datanode</value> </property> <!--设置辅助名称节点--> <property> <name>dfs.namenode.secondary.http-address</name> <value>ied:50090</value> </property> <!--hdfs web的地址,默认为9870,可不配置--> <!--注意如果使用hadoop2,默认为50070--> <property> <name>dfs.namenode.http-address</name> <value>0.0.0.0:9870</value> </property> <!--副本数,默认为3--> <property> <name>dfs.replication</name> <value>1</value> </property> <!--是否启用hdfs权限,当值为false时,代表关闭--> <property> <name>dfs.permissions.enabled</name> <value>false</value> </property> </configuration>
vim mapred-site.xml
<configuration> <!--配置MR资源调度框架YARN--> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>yarn.app.mapreduce.am.env</name> <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value> </property> <property> <name>mapreduce.map.env</name> <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value> </property> <property> <name>mapreduce.reduce.env</name> <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value> </property> </configuration>
vim yarn-site.xml
<configuration> <!--配置资源管理器:ied--> <property> <name>yarn.resourcemanager.hostname</name> <value>ied</value> </property> <!--配置节点管理器上运行的附加服务--> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <!--关闭虚拟内存检测,在虚拟机环境中不做配置会报错--> <property> <name>yarn.nodemanager.vmem-check-enabled</name> <value>false</value> </property> </configuration>
slaves
文件,hadoop-3.x里配置workers
文件vim workers
hdfs namenode -format
Storage directory /usr/local/hadoop-3.3.4/tmp/namenode has been successfully formatted
. 表明名称节点格式化成功。start-dfs.sh
start-yarn.sh
jps
start-dfs.sh
与start-yarn.sh
可以用一条命令start-all.sh来替换Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。