赞
踩
vim /etc/hosts
export JAVA_HOME=/usr/local/jdk1.8.0_231
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
tar -zxvf hadoop-3.3.4.tar.gz -C /usr/local
vim /etc/profile
export HADOOP_HOME=/usr/local/hadoop-3.3.4
export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
source /etc/profile
,让配置生效hadoop version
cd $HADOOP_HOME/etc/hadoop
vim hadoop-env.sh
export JAVA_HOME=/usr/local/jdk1.8.0_231
export HADOOP_HOME=/usr/local/hadoop-3.3.4
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
source hadoop-env.sh
,让配置生效vim core-site
<configuration>
<!--用来指定hdfs的老大-->
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
<!--用来指定hadoop运行时产生文件的存放目录-->
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop-3.3.4/tmp</value>
</property>
</configuration>
vim hdfs-site.xml
<configuration> <!--设置名称节点的目录--> <property> <name>dfs.namenode.name.dir</name> <value>/usr/local/hadoop-3.3.4/tmp/namenode</value> </property> <!--设置数据节点的目录--> <property> <name>dfs.datanode.data.dir</name> <value>/usr/local/hadoop-3.3.4/tmp/datanode</value> </property> <!--设置辅助名称节点--> <property> <name>dfs.namenode.secondary.http-address</name> <value>master:50090</value> </property> <!--hdfs web的地址,默认为9870,可不配置--> <!--注意如果使用hadoop2.x,默认为50070--> <property> <name>dfs.namenode.http-address</name> <value>0.0.0.0:9870</value> </property> <!--副本数,默认为3--> <property> <name>dfs.replication</name> <value>3</value> </property> <!--是否启用hdfs权限,当值为false时,代表关闭--> <property> <name>dfs.permissions.enabled</name> <value>false</value> </property> </configuration>
vim mapred-site.xml
<configuration>
<!--配置MR资源调度框架YARN-->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
vim yarn-site.xml
<configuration> <!--配置资源管理器:master--> <property> <name>yarn.resourcemanager.hostname</name> <value>master</value> </property> <!--配置节点管理器上运行的附加服务--> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <!--关闭虚拟内存检测,在虚拟机环境中不做配置会报错--> <property> <name>yarn.nodemanager.vmem-check-enabled</name> <value>false</value> </property> </configuration>
vim workers
scp -r $JAVA_HOME root@slave1:$JAVA_HOME
(注意,拷贝目录,一定要加-r
选项)scp -r $HADOOP_HOME root@slave1:$HADOOP_HOME
scp /etc/profile root@slave1:/etc/profile
source /etc/profile
java -version
hadoop version
scp /etc/hosts root@slave1:/etc/hosts
scp -r $JAVA_HOME root@slave2:$JAVA_HOME
(注意,拷贝目录,一定要加-r
选项)scp -r $HADOOP_HOME root@slave2:$HADOOP_HOME
scp /etc/profile root@slave2:/etc/profile
source /etc/profile
在slave2节点上执行命令:java -version
在slave2节点上执行命令:hadoop version
scp /etc/hosts root@slave2:/etc/hosts
hdfs namenode -format
common.Storage: Storage directory /usr/local/hadoop-3.3.4/tmp/namenode has been successfully formatted.
表明名称节点格式化成功。start-all.sh
jps
hadoop-env.sh
没有刷新,导致HADOOP_CONF_DIR
环境变量找到不到core-site.xml
,fs.defaultFS
写成了fs.defaultFs
,导致启动Hadoop之后,没有HDFS的三个进程:NameNode
、SecondaryNameNode
、DateNode
hdfs dfs -ls /
hdfs dfs -mkdir /BigData
-p
<多层目录>hdfs dfs -mkdir -p /china/sichuan/luzhou/lzy
hdfs dfs -ls -R /china
-R
(- recursive)表示递归查看目录里全部东西hdfs dfs -put /etc/hosts /BigData
hdfs dfs -D dfs.replication=2 -put /etc/profile /BigData
(上传文件时指定副本数)hdfs dfs -setrep 3 /BigData/profile
hdfs dfs -cat /BigData/hosts
hdfs dfs -rm /BigData/hosts
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。