当前位置:   article > 正文

Docker中快速搭建hadoop3.2.1集群_dockercompose 单机部署hadoop3.3.2

dockercompose 单机部署hadoop3.3.2

安装环境

Ubuntu20.04 x64
各版本情况:
hadoop_version:3.2.1

spark_version:3.1.1

zeppelin_version:0.8.0
  • 1
  • 2
  • 3
  • 4
  • 5

安装步骤

安装docker:
sudo apt-get install docker
  • 1
安装docker-compose
sudo apt-get install docker-compose
  • 1
配置docker-compose.yml
vim docker-compose.yml
  • 1
version: "3"
services:
  namenode:
    image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
    container_name: namenode
    restart: always
    ports:
      - 9870:9870
      - 9000:9000   
    volumes:
      - hadoop_namenode:/hadoop/dfs/name
    environment:
      - CLUSTER_NAME=test
    env_file:
      - ./hadoop.env

  datanode:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    container_name: datanode1
    restart: always
    volumes:
      - hadoop_datanode:/hadoop/dfs/data
    ports:
      - 9864:9864
    environment:
      SERVICE_PRECONDITION: "namenode:9870"
    env_file:
      - ./hadoop.env

  datanode2:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    container_name: datanode2
    restart: always
    volumes:
      - hadoop_datanode2:/hadoop/dfs/data
    ports:
      - 9863:9864
    environment:
      SERVICE_PRECONDITION: "namenode:9870"
    env_file:
      - ./hadoop.env

  resourcemanager:
    image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
    container_name: resourcemanager
    restart: always
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 datanode2:9864"
    ports:
      - 8088:8088
    expose:
      - "8032"
      - "8031"
      - "8030"
    env_file:
      - ./hadoop.env

  nodemanager1:
    image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
    container_name: nodemanager
    restart: always
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 datanode2:9864 resourcemanager:8088"
    ports:
      - 8042:8042
    env_file:
      - ./hadoop.env
  
  historyserver:
    image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
    container_name: historyserver
    restart: always
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 datanode2:9864 resourcemanager:8088"
    volumes:
      - hadoop_historyserver:/hadoop/yarn/timeline
    ports:
      - 8188:8188
    env_file:
      - ./hadoop.env

  spark-master:
    image: bde2020/spark-master:3.1.1-hadoop3.2
    container_name: spark-master
    ports:
      - 8080:8080
      - 7077:7077
    env_file:
      - ./hadoop.env

  spark-worker1:
    image: bde2020/spark-worker:3.1.1-hadoop3.2
    environment:
      - "SPARK_MASTER=spark://spark-master:7077"
    env_file:
      - ./hadoop.env
  
  spark-worker2:
    image: bde2020/spark-worker:3.1.1-hadoop3.2
    environment:
      - "SPARK_MASTER=spark://spark-master:7077"
    env_file:
      - ./hadoop.env
 
  zeppelin:
    image: bde2020/zeppelin:0.8.0-hadoop-2.8.0-spark-2.3.1
    ports:
      - 80:8080
    volumes:
      - ./notebook:/opt/zeppelin/notebook
    environment:
      SPARK_MASTER: "spark://spark-master:7077"
      MASTER: "spark:spark-master:7077"
    env_file:
      - ./hadoop.env
volumes:
  hadoop_namenode:
  hadoop_datanode:
  hadoop_datanode2:
  hadoop_historyserver:
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
配置hadoop.env
vim hadoop.env
  • 1
CORE_CONF_fs_defaultFS=hdfs://namenode:9000
CORE_CONF_hadoop_http_staticuser_user=root
CORE_CONF_hadoop_proxyuser_hue_hosts=*
CORE_CONF_hadoop_proxyuser_hue_groups=*
CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec

HDFS_CONF_dfs_webhdfs_enabled=true
HDFS_CONF_dfs_permissions_enabled=false
HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false

YARN_CONF_yarn_log___aggregation___enable=true
YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
YARN_CONF_yarn_resourcemanager_recovery_enabled=true
YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192
YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4
YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031
YARN_CONF_yarn_timeline___service_enabled=true
YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
YARN_CONF_yarn_timeline___service_hostname=historyserver
YARN_CONF_mapreduce_map_output_compress=true
YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle

MAPRED_CONF_mapreduce_framework_name=yarn
MAPRED_CONF_mapred_child_java_opts=-Xmx4096m
MAPRED_CONF_mapreduce_map_memory_mb=4096
MAPRED_CONF_mapreduce_reduce_memory_mb=8192
MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m
MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m
MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
启动集群
sudo docker-compose up -d
  • 1
查看运行情况
sudo docker ps -a
  • 1
进入namenode或者datanode方法
sudo docker exec -it [node名] /bin/bash
  • 1
例如:进入namenode查看
sudo docker exec -it namenode /bin/bash
  • 1
# 查看hadoop集群情况
hdfs dfsadmin -report
  • 1
  • 2
查看服务端口设置
namenode:			http://<ip_address>:9870

datanode1:			http://<ip_address>:9864

datanode2:			http://<ip_address>:9863

resource manager:	http://<ip_address>:8088

history server:		http://<ip_address>:8188

nodemanager:		http://<ip_address>:8042

spark master:		http://<ip_address>:8080

zeppelin:			http://<ip_address>:80

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16

常见hadoop集群命令

#创建名为/abc的文件夹
hdfs dfs -mkdir /abc 

#列出根目录中的内容
hdfs dfs -ls /  

#递归列出多层文件夹的内容
hdfs dfs -ls -R / 

#把Linux系统中/etc/hosts文件上传到HDFS中
hdfs dfs -put /etc/hosts /abc/hosts 

#向文件中追加内容
hdfs dfs -appendToFile /etc/hosts /abc/hosts

#查看文件的MD5值
hdfs dfs -checksum /abc/hosts 

#查看文件/文件夹的大小 -h以人类友好的方式显示大小(过大时带单位)
hdfs dfs -du -h / 

#把HDFS中的文件下载到本地Linux中 ./hosts是下载后保存到本地的位置
hdfs dfs -get /abc/hosts ./hosts 

#查看HDFS中文本文件的内容 只能查看文件文件
hdfs dfs -cat /abc/hosts 

#列出文件结尾处1KB的文件内容
hdfs dfs -tail /abc/hosts 

#修改文件名字或移动位置
hdfs dfs -mv /abc/hosts /abc/xyz 

#复制文件
hdfs dfs -cp /abc/xyz /abc/hosts

#查找名字为xyz的文件的位置
hdfs dfs -find / -name xyz 

#删除名为/abc的文件夹 如果其中还有文件则不能删除
hdfs dfs -rmdir /abc 

#删除文件
hdfs dfs -rm /abc/hosts 

#递归删除文件/文件夹,文件夹中有文件也能删除
hdfs dfs -rm -r /abc 

#查看HDFS文件系统的磁盘使用情况
hdfs dfs -df 

#查看hadoop集群情况
hdfs dfsadmin -report
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/繁依Fanyi0/article/detail/483143
推荐阅读
相关标签
  

闽ICP备14008679号