赞
踩
sudo adduser 用户名 #增加用户,比如 sudo adduser hadoop
#一些其他命令
sudo passwd 用户名 #修改用户密码
sudo chfn 用户名 #修改用户资料
sudo deluser 用户名 #删除用户
su 用户名 #切换到其他用户(需要该用户的密码)
sudo vim /etc/hostname #修改主机名
sudo adduser hadoop sudo
# # This file MUST be edited with the 'visudo' command as root. # # Please consider adding local content in /etc/sudoers.d/ instead of # directly modifying this file. # # See the man page for details on how to write a sudoers file. # Defaults env_reset Defaults mail_badpass Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin" # Host alias specification # User alias specification # Cmnd alias specification # User privilege specification root ALL=(ALL:ALL) ALL # Members of the admin group may gain root privileges %admin ALL=(ALL) ALL # Allow members of group sudo to execute any command %sudo ALL=(ALL:ALL) ALL hadoop ALL=(ALL:ALL) ALL # See sudoers(5) for more information on "#include" directives: #includedir /etc/sudoers.d
ps -e | grep ssh
3167 ? 00:00:00 sshd
sudo apt-get install openssh-server
可以使用
ssh hadoop@192.168.13.227 #hadoop是slaver用户名,ip为slaver ip
或者
ssh 192.168.13.227
命令来尝试登陆。能够登录证明两个虚拟机(计算机)之间的连接正常
ssh-keygen -t rsa
ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@192.168.13.227
ssh hadoop@192.168.13.227
$ su hadoop #切换用户为hadoop
$ sudo tar -zxvf hadoop-2.10.1.tar.gz -C /usr/local #解压文件
$ cd /usr/local/ #进入该文件夹下
$ sudo mv ./hadoop-2.10.1/ ./hadoop # 将文件夹名改为hadoop
$ echo $JAVA_HOME #因为我们前文安装过java环境了,再次查看一下是否正确,安装正确会输出路径
$ cd /usr/local/hadoop/bin #进入hadoop的bin目录
$ ./hadoop version #查看hadoop的版本信息,安装正确会输出版本信息
$ cd /usr/local
$ sudo chown -R hadoop ./hadoop # 修改文件所属用户为hadoop
$ sudo chgrp -R hadoop ./hadoop # 修改文件所属组为hadoop
$ ll #查看hadoop文件夹的用户和组是否都改为了hadoop
$ cd /usr/local/hadoop
$ mkdir input #创建文件夹
$ cp ./etc/hadoop/*.xml ./input # 将配置文件复制到input目录下
$ ./bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep ./input ./output 'dfs[a-z.]+'
$ cat ./output/* # 查看运行结果
1 dfsadmin #这是输出结果
文件名称 | 格式 | 描述 |
---|---|---|
hadoop-env.sh | Bash脚本 | 记录配置Hadoop运行所需的环境变量,以运行Hadoop |
core-size.xml | Hadoop配置XML | Hadoop core的配置项,如HDFS和MapReduce常用的I/O设置等 |
hdfs-size.xml | Hadoop的配置XML | Hadoop守护进程的配置项,包括NameNode、SecondaryNameNode和DataNode等 |
mapred-site.xml | Hadoop配置XML | MapReduce守护进程的配置项,包括JobTracker和TaskTracker |
masters | 纯文本 | 运行SecondaryNameNode的机器列表(每行一个) |
slaves | 纯文本 | 运行DataNode和TaskTracker的机器列表(每行一个) |
hadoop-metrices.properties | Java属性 | 控制metrics在Hadoop上如何发布的属性 |
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/hadoop/tmp</value>
<description>Abase for other temporary directories.</description>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/local/hadoop/tmp/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/local/hadoop/tmp/dfs/data</value>
</property>
</configuration>
$ cd /usr/local/hadoop
$ ./bin/hdfs namenode -format
DEPRECATED: Use of this script to execute hdfs command is deprecated. Instead use the hdfs command for it. 21/04/24 20:05:51 INFO namenode.NameNode: STARTUP_MSG: /************************************************************ STARTUP_MSG: Starting NameNode STARTUP_MSG: host = node1/127.0.1.1 STARTUP_MSG: args = [-format] STARTUP_MSG: version = 2.10.1 STARTUP_MSG: classpath = /usr/local/hadoop/etc/hadoop:/usr/local/hadoop/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/common/lib/stax2-api-3.1.4.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-lang3-3.4.jar:/usr/local/hadoop/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop/share/hadoop/common/lib/activation-1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/hadoop/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/hadoop/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/common/lib/hadoop-annotations-2.10.1.jar:/usr/local/hadoop/share/hadoop/common/lib/zookeeper-3.4.14.jar:/usr/local/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar:/usr/local/hadoop/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/httpcore-4.4.4.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-recipes-2.13.0.jar:/usr/local/hadoop/share/hadoop/common/lib/jcip-annotations-1.0-1.jar:/usr/local/hadoop/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/hadoop/share/hadoop/common/lib/avro-1.7.7.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/common/lib/httpclient-4.5.2.jar:/usr/local/hadoop/share/hadoop/common/lib/json-smart-1.3.1.jar:/usr/local/hadoop/share/hadoop/common/lib/hadoop-auth-2.10.1.jar:/usr/local/hadoop/share/hadoop/common/lib/nimbus-jose-jwt-7.9.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-framework-2.13.0.jar:/usr/local/hadoop/share/hadoop/common/lib/woodstox-core-5.0.3.jar:/usr/local/hadoop/share/hadoop/common/lib/spotbugs-annotations-3.1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/snappy-java-1.0.5.jar:/usr/local/hadoop/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/hadoop/share/hadoop/common/lib/jsch-0.1.55.jar:/usr/local/hadoop/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/hadoop/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/hadoop/share/hadoop/common/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/hadoop/share/hadoop/common/lib/jsr305-3.0.2.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/slf4j-api-1.7.25.jar:/usr/local/hadoop/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/hadoop/share/hadoop/common/lib/jetty-sslengine-6.1.26.jar:/usr/local/hadoop/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/hadoop/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/hadoop/share/hadoop/common/lib/netty-3.10.6.Final.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-beanutils-1.9.4.jar:/usr/local/hadoop/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-compress-1.19.jar:/usr/local/hadoop/share/hadoop/common/lib/curator-client-2.13.0.jar:/usr/local/hadoop/share/hadoop/common/lib/audience-annotations-0.5.0.jar:/usr/local/hadoop/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-collections-3.2.2.jar:/usr/local/hadoop/share/hadoop/common/lib/junit-4.11.jar:/usr/local/hadoop/share/hadoop/common/lib/htrace-core4-4.1.0-incubating.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/hadoop/share/hadoop/common/hadoop-nfs-2.10.1.jar:/usr/local/hadoop/share/hadoop/common/hadoop-common-2.10.1-tests.jar:/usr/local/hadoop/share/hadoop/common/hadoop-common-2.10.1.jar:/usr/local/hadoop/share/hadoop/hdfs:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/hadoop-hdfs-client-2.10.1.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-annotations-2.9.10.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/xercesImpl-2.12.0.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/xml-apis-1.4.01.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/okhttp-2.7.5.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jsr305-3.0.2.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-databind-2.9.10.6.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/jackson-core-2.9.10.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/netty-3.10.6.Final.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/netty-all-4.1.50.Final.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/okio-1.6.0.jar:/usr/local/hadoop/share/hadoop/hdfs/lib/htrace-core4-4.1.0-incubating.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-client-2.10.1.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.10.1.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-rbf-2.10.1.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-rbf-2.10.1-tests.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-native-client-2.10.1.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-client-2.10.1-tests.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.10.1-tests.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-native-client-2.10.1-tests.jar:/usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-nfs-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn:/usr/local/hadoop/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/hadoop/share/hadoop/yarn/lib/stax2-api-3.1.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-lang3-3.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/gson-2.2.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jets3t-0.9.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-net-3.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.14.jar:/usr/local/hadoop/share/hadoop/yarn/lib/geronimo-jcache_1.0_spec-1.0-alpha-1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/api-util-1.0.0-M20.jar:/usr/local/hadoop/share/hadoop/yarn/lib/HikariCP-java7-2.4.12.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-math3-3.1.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/json-io-2.5.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/java-util-1.9.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/httpcore-4.4.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/curator-recipes-2.13.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jcip-annotations-1.0-1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jsp-api-2.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/mssql-jdbc-6.2.1.jre7.jar:/usr/local/hadoop/share/hadoop/yarn/lib/fst-2.50.jar:/usr/local/hadoop/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/yarn/lib/avro-1.7.7.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/httpclient-4.5.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/json-smart-1.3.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/nimbus-jose-jwt-7.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/curator-framework-2.13.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/woodstox-core-5.0.3.jar:/usr/local/hadoop/share/hadoop/yarn/lib/spotbugs-annotations-3.1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/snappy-java-1.0.5.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/hadoop/share/hadoop/yarn/lib/paranamer-2.3.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jsch-0.1.55.jar:/usr/local/hadoop/share/hadoop/yarn/lib/xmlenc-0.52.jar:/usr/local/hadoop/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jsr305-3.0.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jetty-sslengine-6.1.26.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-digester-1.8.jar:/usr/local/hadoop/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/hadoop/share/hadoop/yarn/lib/netty-3.10.6.Final.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-beanutils-1.9.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/hadoop/share/hadoop/yarn/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-compress-1.19.jar:/usr/local/hadoop/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/curator-client-2.13.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/metrics-core-3.0.1.jar:/usr/local/hadoop/share/hadoop/yarn/lib/audience-annotations-0.5.0.jar:/usr/local/hadoop/share/hadoop/yarn/lib/java-xmlbuilder-0.4.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/usr/local/hadoop/share/hadoop/yarn/lib/htrace-core4-4.1.0-incubating.jar:/usr/local/hadoop/share/hadoop/yarn/lib/commons-configuration-1.6.jar:/usr/local/hadoop/share/hadoop/yarn/lib/ehcache-3.3.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-registry-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-tests-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-api-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-common-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-common-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-router-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-client-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.10.1.jar:/usr/local/hadoop/share/hadoop/yarn/hadoop-yarn-server-timeline-pluginstorage-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/hadoop-annotations-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/avro-1.7.7.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/snappy-java-1.0.5.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/netty-3.10.6.Final.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/commons-compress-1.19.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/hadoop/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.10.1-tests.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.10.1.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.10.1.jar:/contrib/capacity-scheduler/*.jar:/contrib/capacity-scheduler/*.jar STARTUP_MSG: build = https://github.com/apache/hadoop -r 1827467c9a56f133025f28557bfc2c562d78e816; compiled by 'centos' on 2020-09-14T13:17Z STARTUP_MSG: java = 1.8.0_291 ************************************************************/ 21/04/24 20:05:51 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT] 21/04/24 20:05:51 INFO namenode.NameNode: createNameNode [-format] Java HotSpot(TM) Client VM warning: You have loaded library /usr/local/hadoop/lib/native/libhadoop.so.1.0.0 which might have disabled stack guard. The VM will try to fix the stack guard now. It's highly recommended that you fix the library with 'execstack -c <libfile>', or link it with '-z noexecstack'. 21/04/24 20:05:52 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable Formatting using clusterid: CID-48db268d-e942-412c-b33e-724efc960870 21/04/24 20:05:55 INFO namenode.FSEditLog: Edit logging is async:true 21/04/24 20:05:56 INFO namenode.FSNamesystem: KeyProvider: null 21/04/24 20:05:56 INFO namenode.FSNamesystem: fsLock is fair: true 21/04/24 20:05:56 INFO namenode.FSNamesystem: Detailed lock hold time metrics enabled: false 21/04/24 20:05:56 INFO namenode.FSNamesystem: fsOwner = hadoop (auth:SIMPLE) 21/04/24 20:05:56 INFO namenode.FSNamesystem: supergroup = supergroup 21/04/24 20:05:56 INFO namenode.FSNamesystem: isPermissionEnabled = true 21/04/24 20:05:56 INFO namenode.FSNamesystem: HA Enabled: false 21/04/24 20:05:56 INFO common.Util: dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling 21/04/24 20:05:56 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit: configured=1000, counted=60, effected=1000 21/04/24 20:05:56 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true 21/04/24 20:05:56 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000 21/04/24 20:05:56 INFO blockmanagement.BlockManager: The block deletion will start around 2021 四月 24 20:05:56 21/04/24 20:05:56 INFO util.GSet: Computing capacity for map BlocksMap 21/04/24 20:05:56 INFO util.GSet: VM type = 32-bit 21/04/24 20:05:56 INFO util.GSet: 2.0% max memory 966.7 MB = 19.3 MB 21/04/24 20:05:56 INFO util.GSet: capacity = 2^22 = 4194304 entries 21/04/24 20:05:56 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false 21/04/24 20:05:56 WARN conf.Configuration: No unit for dfs.heartbeat.interval(3) assuming SECONDS 21/04/24 20:05:56 WARN conf.Configuration: No unit for dfs.namenode.safemode.extension(30000) assuming MILLISECONDS 21/04/24 20:05:56 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.threshold-pct = 0.9990000128746033 21/04/24 20:05:56 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.min.datanodes = 0 21/04/24 20:05:56 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.extension = 30000 21/04/24 20:05:56 INFO blockmanagement.BlockManager: defaultReplication = 1 21/04/24 20:05:56 INFO blockmanagement.BlockManager: maxReplication = 512 21/04/24 20:05:56 INFO blockmanagement.BlockManager: minReplication = 1 21/04/24 20:05:56 INFO blockmanagement.BlockManager: maxReplicationStreams = 2 21/04/24 20:05:56 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000 21/04/24 20:05:56 INFO blockmanagement.BlockManager: encryptDataTransfer = false 21/04/24 20:05:56 INFO blockmanagement.BlockManager: maxNumBlocksToLog = 1000 21/04/24 20:05:56 INFO namenode.FSNamesystem: Append Enabled: true 21/04/24 20:05:56 INFO namenode.FSDirectory: GLOBAL serial map: bits=24 maxEntries=16777215 21/04/24 20:05:57 INFO util.GSet: Computing capacity for map INodeMap 21/04/24 20:05:57 INFO util.GSet: VM type = 32-bit 21/04/24 20:05:57 INFO util.GSet: 1.0% max memory 966.7 MB = 9.7 MB 21/04/24 20:05:57 INFO util.GSet: capacity = 2^21 = 2097152 entries 21/04/24 20:05:57 INFO namenode.FSDirectory: ACLs enabled? false 21/04/24 20:05:57 INFO namenode.FSDirectory: XAttrs enabled? true 21/04/24 20:05:57 INFO namenode.NameNode: Caching file names occurring more than 10 times 21/04/24 20:05:57 INFO snapshot.SnapshotManager: Loaded config captureOpenFiles: falseskipCaptureAccessTimeOnlyChange: false 21/04/24 20:05:57 INFO util.GSet: Computing capacity for map cachedBlocks 21/04/24 20:05:57 INFO util.GSet: VM type = 32-bit 21/04/24 20:05:57 INFO util.GSet: 0.25% max memory 966.7 MB = 2.4 MB 21/04/24 20:05:57 INFO util.GSet: capacity = 2^19 = 524288 entries 21/04/24 20:05:57 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10 21/04/24 20:05:57 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10 21/04/24 20:05:57 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25 21/04/24 20:05:57 INFO namenode.FSNamesystem: Retry cache on namenode is enabled 21/04/24 20:05:57 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis 21/04/24 20:05:57 INFO util.GSet: Computing capacity for map NameNodeRetryCache 21/04/24 20:05:57 INFO util.GSet: VM type = 32-bit 21/04/24 20:05:57 INFO util.GSet: 0.029999999329447746% max memory 966.7 MB = 297.0 KB 21/04/24 20:05:57 INFO util.GSet: capacity = 2^16 = 65536 entries 21/04/24 20:05:57 INFO namenode.FSImage: Allocated new BlockPoolId: BP-529316488-127.0.1.1-1619265957484 21/04/24 20:05:57 INFO common.Storage: Storage directory /usr/local/hadoop/tmp/dfs/name has been successfully formatted. 21/04/24 20:05:57 INFO namenode.FSImageFormatProtobuf: Saving image file /usr/local/hadoop/tmp/dfs/name/current/fsimage.ckpt_0000000000000000000 using no compression 21/04/24 20:05:58 INFO namenode.FSImageFormatProtobuf: Image file /usr/local/hadoop/tmp/dfs/name/current/fsimage.ckpt_0000000000000000000 of size 325 bytes saved in 0 seconds . 21/04/24 20:05:58 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0 21/04/24 20:05:58 INFO namenode.FSImage: FSImageSaver clean checkpoint: txid = 0 when meet shutdown. 21/04/24 20:05:58 INFO namenode.NameNode: SHUTDOWN_MSG: /************************************************************ SHUTDOWN_MSG: Shutting down NameNode at node1/127.0.1.1 ************************************************************/
$ cd /usr/local/hadoop
$ ./sbin/start-dfs.sh
hadoop@node1:/usr/local/hadoop$ ./sbin/start-dfs.sh
Java HotSpot(TM) Client VM warning: You have loaded library /usr/local/hadoop/lib/native/libhadoop.so.1.0.0 which might have disabled stack guard. The VM will try to fix the stack guard now.
It's highly recommended that you fix the library with 'execstack -c <libfile>', or link it with '-z noexecstack'.
21/04/24 20:18:11 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Starting namenodes on [localhost]
localhost: ssh: connect to host localhost port 22: Connection refused
localhost: ssh: connect to host localhost port 22: Connection refused
Starting secondary namenodes [0.0.0.0]
0.0.0.0: ssh: connect to host 0.0.0.0 port 22: Connection refused
Java HotSpot(TM) Client VM warning: You have loaded library /usr/local/hadoop/lib/native/libhadoop.so.1.0.0 which might have disabled stack guard. The VM will try to fix the stack guard now.
It's highly recommended that you fix the library with 'execstack -c <libfile>', or link it with '-z noexecstack'.
21/04/24 20:18:17 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
$ ssh localhost #使用该命令发现没有安装openssh-server
$ sudo apt-get install openssh-server #安装
$ ps -e|grep ssh #安装后查看ssh-server是否启动了
localhost: Error: JAVA_HOME is not set and could not be found.
hadoop@node1:/usr/local/hadoop$ echo $JAVA_HOME
/usr/lib/jdk/jdk1.8.0
21/04/24 20:37:02 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
export HADOOP_HOME=/usr/local/hadoop
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
source /etc/profile
hadoop@node1:/usr/local/hadoop$ jps
11203 DataNode
11367 SecondaryNameNode
11047 NameNode
11583 Jps
$ ./sbin/stop-dfs.sh # 关闭hadoop
$ rm -r ./tmp # 删除 tmp 文件,注意这会删除 HDFS中原有的所有数据
$ ./bin/hdfs namenode -format # 重新格式化名称节点
$ ./sbin/start-dfs.sh # 重启hadoop
$ cd /usr/local/hadoop $ ./bin/hdfs dfs -mkdir -p /user/hadoop # 在HDFS中创建用户目录 $ ./bin/hdfs dfs -mkdir input #在HDFS中创建hadoop用户对应的input目录 $ ./bin/hdfs dfs -put ./etc/hadoop/*.xml input #把本地文件复制到HDFS中 $ ./bin/hdfs dfs -ls input #查看文件列表 Found 8 items -rw-r--r-- 1 hadoop supergroup 4436 2019-01-11 19:35 input/capacity-scheduler.xml -rw-r--r-- 1 hadoop supergroup 1075 2019-01-11 19:35 input/core-site.xml -rw-r--r-- 1 hadoop supergroup 9683 2019-01-11 19:35 input/hadoop-policy.xml -rw-r--r-- 1 hadoop supergroup 1130 2019-01-11 19:35 input/hdfs-site.xml -rw-r--r-- 1 hadoop supergroup 620 2019-01-11 19:35 input/httpfs-site.xml -rw-r--r-- 1 hadoop supergroup 3518 2019-01-11 19:35 input/kms-acls.xml -rw-r--r-- 1 hadoop supergroup 5511 2019-01-11 19:35 input/kms-site.xml -rw-r--r-- 1 hadoop supergroup 690 2019-01-11 19:35 input/yarn-site.xml $ ./bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'dfs[a-z.]+' .... $ ./bin/hdfs dfs -cat output/* #查看运行结果 $ cd /usr/local/hadoop $ ./bin/hdfs dfs -mkdir -p /user/hadoop # 在HDFS中创建用户目录 $ ./bin/hdfs dfs -mkdir input #在HDFS中创建hadoop用户对应的input目录 $ ./bin/hdfs dfs -put ./etc/hadoop/*.xml input #把本地文件复制到HDFS中 $ ./bin/hdfs dfs -ls input #查看文件列表 Found 8 items -rw-r--r-- 1 hadoop supergroup 8814 2021-04-24 21:21 input/capacity-scheduler.xml -rw-r--r-- 1 hadoop supergroup 1076 2021-04-24 21:21 input/core-site.xml -rw-r--r-- 1 hadoop supergroup 10206 2021-04-24 21:21 input/hadoop-policy.xml -rw-r--r-- 1 hadoop supergroup 1134 2021-04-24 21:21 input/hdfs-site.xml -rw-r--r-- 1 hadoop supergroup 620 2021-04-24 21:21 input/httpfs-site.xml -rw-r--r-- 1 hadoop supergroup 3518 2021-04-24 21:21 input/kms-acls.xml -rw-r--r-- 1 hadoop supergroup 5939 2021-04-24 21:21 input/kms-site.xml -rw-r--r-- 1 hadoop supergroup 690 2021-04-24 21:21 input/yarn-site.xml $ ./bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'dfs[a-z.]+' .... $ ./bin/hdfs dfs -cat output/* #查看运行结果 1 dfsadmin 1 dfs.replication 1 dfs.namenode.name.dir 1 dfs.datanode.data.dir
$ ./bin/hdfs dfs -rm -r output # 删除 output 文件夹
./sbin/stop-dfs.sh
sudo vim /etc/hosts
192.168.43.202 node1
192.168.43.106 node2
127.0.0.1 localhost
192.168.43.202 node1
192.168.43.106 node2
192.168.43.177 node3
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ping node1 -c 3 # 只ping 3次,否则要按 Ctrl+c 中断
ping node2 -c 3
ping node3 -c 3
export PATH=$PATH:/usr/local/hadoop/bin:/usr/local/hadoop/sbin
vim /usr/local/hadoop/etc/hadoop/slaves
node2
node3
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://node1:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/hadoop/tmp</value>
<description>Abase for other temporary directories.</description>
</property>
</configuration>
<configuration> <property> <name>dfs.namenode.secondary.http-address</name> <value>node1:50090</value> </property> <property> <name>dfs.replication</name> <value>2</value> </property> <property> <name>dfs.namenode.name.dir</name> <value>file:/usr/local/hadoop/tmp/dfs/name</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/usr/local/hadoop/tmp/dfs/data</value> </property> </configuration>
cp /usr/local/hadoop/etc/hadoop/mapred-site.xml.template /usr/local/hadoop/etc/hadoop/mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>node1:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>node1:19888</value>
</property>
</configuration>
<configuration> <property> <name>yarn.resourcemanager.hostname</name> <value>node1</value> </property> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>node1:18040</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>node1:18030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>node1:18025</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>node1:18141</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>node1:18088</value> </property> </configuration>
cd /usr/local
sudo rm -r ./hadoop/tmp # 删除 Hadoop 临时文件
sudo rm -r ./hadoop/logs/* # 删除日志文件
tar -zcf ~/hadoop.master.tar.gz ./hadoop # 先压缩再复制
cd ~
scp ./hadoop.master.tar.gz hadoop@node2:/home/hadoop #复制到salver节点node2
scp ./hadoop.master.tar.gz hadoop@node3:/home/hadoop #复制到salver节点node3
sudo rm -r /usr/local/hadoop # 删掉旧的(如果存在)
sudo tar -zxf ~/hadoop.master.tar.gz -C /usr/local
sudo chown -R hadoop /usr/local/hadoop #设置所属名为hadoop
hdfs namenode -format # 首次运行需要执行初始化,之后不需要
cd /usr/local/hadoop/sbin
./start-dfs.sh
./start-yarn.sh
./mr-jobhistory-daemon.sh start historyserver
hadoop@node1:/usr/local/hadoop$ jps
3072 Jps
2340 NameNode
2984 JobHistoryServer
2525 SecondaryNameNode
2702 ResourceManager
hadoop@node1:/usr/local/hadoop$
hadoop@node2:/usr/local/hadoop$ jps
2544 Jps
2273 DataNode
2376 NodeManager
hadoop@node3:/usr/local/hadoop$ jps
2172 DataNode
2271 NodeManager
2463 Jps
hadoop@node1:/usr/local/hadoop$ hdfs dfsadmin -report Java HotSpot(TM) Client VM warning: You have loaded library /usr/local/hadoop/lib/native/libhadoop.so.1.0.0 which might have disabled stack guard. The VM will try to fix the stack guard now. It's highly recommended that you fix the library with 'execstack -c <libfile>', or link it with '-z noexecstack'. 21/04/26 20:03:19 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable Configured Capacity: 18850250752 (17.56 GB) Present Capacity: 4380217344 (4.08 GB) DFS Remaining: 4380168192 (4.08 GB) DFS Used: 49152 (48 KB) DFS Used%: 0.00% Under replicated blocks: 0 Blocks with corrupt replicas: 0 Missing blocks: 0 Missing blocks (with replication factor 1): 0 Pending deletion blocks: 0 ------------------------------------------------- Live datanodes (2): Name: 192.168.43.106:50010 (node2) Hostname: node2 Decommission Status : Normal Configured Capacity: 9425125376 (8.78 GB) DFS Used: 24576 (24 KB) Non DFS Used: 6671843328 (6.21 GB) DFS Remaining: 2250887168 (2.10 GB) DFS Used%: 0.00% DFS Remaining%: 23.88% Configured Cache Capacity: 0 (0 B) Cache Used: 0 (0 B) Cache Remaining: 0 (0 B) Cache Used%: 100.00% Cache Remaining%: 0.00% Xceivers: 1 Last contact: Mon Apr 26 20:03:19 CST 2021 Last Block Report: Mon Apr 26 19:58:04 CST 2021 Name: 192.168.43.177:50010 (node3) Hostname: node3 Decommission Status : Normal Configured Capacity: 9425125376 (8.78 GB) DFS Used: 24576 (24 KB) Non DFS Used: 6793449472 (6.33 GB) DFS Remaining: 2129281024 (1.98 GB) DFS Used%: 0.00% DFS Remaining%: 22.59% Configured Cache Capacity: 0 (0 B) Cache Used: 0 (0 B) Cache Remaining: 0 (0 B) Cache Used%: 100.00% Cache Remaining%: 0.00% Xceivers: 1 Last contact: Mon Apr 26 20:03:20 CST 2021 Last Block Report: Mon Apr 26 19:58:05 CST 2021
hdfs dfs -mkdir -p /user/hadoop
hdfs dfs -mkdir input
hdfs dfs -put /usr/local/hadoop/etc/hadoop/*.xml input
hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'dfs[a-z.]+'
... 21/04/26 20:24:33 INFO client.RMProxy: Connecting to ResourceManager at node1/192.168.43.202:18040 21/04/26 20:24:35 INFO input.FileInputFormat: Total input files to process : 1 21/04/26 20:24:36 INFO mapreduce.JobSubmitter: number of splits:1 21/04/26 20:24:37 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1619438292942_0002 21/04/26 20:24:37 INFO impl.YarnClientImpl: Submitted application application_1619438292942_0002 21/04/26 20:24:37 INFO mapreduce.Job: The url to track the job: http://node1:18088/proxy/application_1619438292942_0002/ 21/04/26 20:24:37 INFO mapreduce.Job: Running job: job_1619438292942_0002 21/04/26 20:24:57 INFO mapreduce.Job: Job job_1619438292942_0002 running in uber mode : false 21/04/26 20:24:57 INFO mapreduce.Job: map 0% reduce 0% 21/04/26 20:25:06 INFO mapreduce.Job: map 100% reduce 0% 21/04/26 20:25:16 INFO mapreduce.Job: map 100% reduce 100% 21/04/26 20:25:17 INFO mapreduce.Job: Job job_1619438292942_0002 completed successfully 21/04/26 20:25:17 INFO mapreduce.Job: Counters: 49 File System Counters FILE: Number of bytes read=153 FILE: Number of bytes written=416619 FILE: Number of read operations=0 FILE: Number of large read operations=0 FILE: Number of write operations=0 HDFS: Number of bytes read=390 HDFS: Number of bytes written=107 HDFS: Number of read operations=7 HDFS: Number of large read operations=0 HDFS: Number of write operations=2 Job Counters Launched map tasks=1 Launched reduce tasks=1 Data-local map tasks=1 Total time spent by all maps in occupied slots (ms)=6535 Total time spent by all reduces in occupied slots (ms)=5743 Total time spent by all map tasks (ms)=6535 Total time spent by all reduce tasks (ms)=5743 Total vcore-milliseconds taken by all map tasks=6535 Total vcore-milliseconds taken by all reduce tasks=5743 Total megabyte-milliseconds taken by all map tasks=6691840 Total megabyte-milliseconds taken by all reduce tasks=5880832 Map-Reduce Framework Map input records=5 Map output records=5 Map output bytes=137 Map output materialized bytes=153 Input split bytes=127 Combine input records=0 Combine output records=0 Reduce input groups=1 Reduce shuffle bytes=153 Reduce input records=5 Reduce output records=5 Spilled Records=10 Shuffled Maps =1 Failed Shuffles=0 Merged Map outputs=1 GC time elapsed (ms)=274 CPU time spent (ms)=1290 Physical memory (bytes) snapshot=254038016 Virtual memory (bytes) snapshot=729309184 Total committed heap usage (bytes)=137498624 Shuffle Errors BAD_ID=0 CONNECTION=0 IO_ERROR=0 WRONG_LENGTH=0 WRONG_MAP=0 WRONG_REDUCE=0 File Input Format Counters Bytes Read=263 File Output Format Counters Bytes Written=107
hadoop@node1:/usr/local/hadoop$ ./bin/hdfs dfs -cat output/*
Java HotSpot(TM) Client VM warning: You have loaded library /usr/local/hadoop/lib/native/libhadoop.so.1.0.0 which might have disabled stack guard. The VM will try to fix the stack guard now.
It's highly recommended that you fix the library with 'execstack -c <libfile>', or link it with '-z noexecstack'.
21/04/26 20:27:53 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
1 dfsadmin
1 dfs.replication
1 dfs.namenode.secondary.http
1 dfs.namenode.name.dir
1 dfs.datanode.data.dir
cd /usr/local/hadoop/sbin
./stop-yarn.sh
./stop-dfs.sh
./mr-jobhistory-daemon.sh stop historyserver
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。