赞
踩
hadoop_version:3.2.1
spark_version:3.1.1
zeppelin_version:0.8.0
sudo apt-get install docker
sudo apt-get install docker-compose
vim docker-compose.yml
version: "3" services: namenode: image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8 container_name: namenode restart: always ports: - 9870:9870 - 9000:9000 volumes: - hadoop_namenode:/hadoop/dfs/name environment: - CLUSTER_NAME=test env_file: - ./hadoop.env datanode: image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8 container_name: datanode1 restart: always volumes: - hadoop_datanode:/hadoop/dfs/data ports: - 9864:9864 environment: SERVICE_PRECONDITION: "namenode:9870" env_file: - ./hadoop.env datanode2: image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8 container_name: datanode2 restart: always volumes: - hadoop_datanode2:/hadoop/dfs/data ports: - 9863:9864 environment: SERVICE_PRECONDITION: "namenode:9870" env_file: - ./hadoop.env resourcemanager: image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8 container_name: resourcemanager restart: always environment: SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 datanode2:9864" ports: - 8088:8088 expose: - "8032" - "8031" - "8030" env_file: - ./hadoop.env nodemanager1: image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8 container_name: nodemanager restart: always environment: SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 datanode2:9864 resourcemanager:8088" ports: - 8042:8042 env_file: - ./hadoop.env historyserver: image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8 container_name: historyserver restart: always environment: SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 datanode2:9864 resourcemanager:8088" volumes: - hadoop_historyserver:/hadoop/yarn/timeline ports: - 8188:8188 env_file: - ./hadoop.env spark-master: image: bde2020/spark-master:3.1.1-hadoop3.2 container_name: spark-master ports: - 8080:8080 - 7077:7077 env_file: - ./hadoop.env spark-worker1: image: bde2020/spark-worker:3.1.1-hadoop3.2 environment: - "SPARK_MASTER=spark://spark-master:7077" env_file: - ./hadoop.env spark-worker2: image: bde2020/spark-worker:3.1.1-hadoop3.2 environment: - "SPARK_MASTER=spark://spark-master:7077" env_file: - ./hadoop.env zeppelin: image: bde2020/zeppelin:0.8.0-hadoop-2.8.0-spark-2.3.1 ports: - 80:8080 volumes: - ./notebook:/opt/zeppelin/notebook environment: SPARK_MASTER: "spark://spark-master:7077" MASTER: "spark:spark-master:7077" env_file: - ./hadoop.env volumes: hadoop_namenode: hadoop_datanode: hadoop_datanode2: hadoop_historyserver:
vim hadoop.env
CORE_CONF_fs_defaultFS=hdfs://namenode:9000 CORE_CONF_hadoop_http_staticuser_user=root CORE_CONF_hadoop_proxyuser_hue_hosts=* CORE_CONF_hadoop_proxyuser_hue_groups=* CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec HDFS_CONF_dfs_webhdfs_enabled=true HDFS_CONF_dfs_permissions_enabled=false HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false YARN_CONF_yarn_log___aggregation___enable=true YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/ YARN_CONF_yarn_resourcemanager_recovery_enabled=true YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192 YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4 YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true YARN_CONF_yarn_resourcemanager_hostname=resourcemanager YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032 YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030 YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031 YARN_CONF_yarn_timeline___service_enabled=true YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true YARN_CONF_yarn_timeline___service_hostname=historyserver YARN_CONF_mapreduce_map_output_compress=true YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec YARN_CONF_yarn_nodemanager_resource_memory___mb=16384 YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8 YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5 YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle MAPRED_CONF_mapreduce_framework_name=yarn MAPRED_CONF_mapred_child_java_opts=-Xmx4096m MAPRED_CONF_mapreduce_map_memory_mb=4096 MAPRED_CONF_mapreduce_reduce_memory_mb=8192 MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/ MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/ MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
sudo docker-compose up -d
sudo docker ps -a
sudo docker exec -it [node名] /bin/bash
sudo docker exec -it namenode /bin/bash
# 查看hadoop集群情况
hdfs dfsadmin -report
namenode: http://<ip_address>:9870 datanode1: http://<ip_address>:9864 datanode2: http://<ip_address>:9863 resource manager: http://<ip_address>:8088 history server: http://<ip_address>:8188 nodemanager: http://<ip_address>:8042 spark master: http://<ip_address>:8080 zeppelin: http://<ip_address>:80
#创建名为/abc的文件夹 hdfs dfs -mkdir /abc #列出根目录中的内容 hdfs dfs -ls / #递归列出多层文件夹的内容 hdfs dfs -ls -R / #把Linux系统中/etc/hosts文件上传到HDFS中 hdfs dfs -put /etc/hosts /abc/hosts #向文件中追加内容 hdfs dfs -appendToFile /etc/hosts /abc/hosts #查看文件的MD5值 hdfs dfs -checksum /abc/hosts #查看文件/文件夹的大小 -h以人类友好的方式显示大小(过大时带单位) hdfs dfs -du -h / #把HDFS中的文件下载到本地Linux中 ./hosts是下载后保存到本地的位置 hdfs dfs -get /abc/hosts ./hosts #查看HDFS中文本文件的内容 只能查看文件文件 hdfs dfs -cat /abc/hosts #列出文件结尾处1KB的文件内容 hdfs dfs -tail /abc/hosts #修改文件名字或移动位置 hdfs dfs -mv /abc/hosts /abc/xyz #复制文件 hdfs dfs -cp /abc/xyz /abc/hosts #查找名字为xyz的文件的位置 hdfs dfs -find / -name xyz #删除名为/abc的文件夹 如果其中还有文件则不能删除 hdfs dfs -rmdir /abc #删除文件 hdfs dfs -rm /abc/hosts #递归删除文件/文件夹,文件夹中有文件也能删除 hdfs dfs -rm -r /abc #查看HDFS文件系统的磁盘使用情况 hdfs dfs -df #查看hadoop集群情况 hdfs dfsadmin -report
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。