赞
踩
https://www.yuque.com/docs/share/5ccf027d-7944-42ee-a3e1-4f479ae662cb?# 《安装问题锦集》
# 镜像比较大, 需要准备一个网络稳定的环境
# 其中--mirror Aliyun代表使用阿里源
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
vmwaretools安装可以实现复制:
https://blog.csdn.net/blessed_sy/article/details/81173746
更换下载源:
https://blog.csdn.net/perfect1t/article/details/81006947
编辑:
sudo gedit /etc/apt/source.list
刷新下载库源:
sudo apt-get update
获取docker:
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
ssh安装:
https://www.cnblogs.com/asyang1/p/9467646.html
ubuntu安装ssh无法获得锁
一共有三个文件,只要输入如下的命令
rm /var/lib/dpkg/lock
rm /var/lib/dpkg/lock-frontend
rm /var/cache/apt/archives/lock
即可解除占用
# 下载安装包, 拖动安装即可
https://hub.docker.com/editions/community/docker-ce-desktop-mac/
# win10家庭版 【参考】
https://docs.docker.com/docker-for-windows/install-windows-home/
# win10专业版、商业版或教育版 【参考】
https://docs.docker.com/docker-for-windows/install/
docker pull centos:7
#修改root用户密码
sudo passwd
#更改成root用户:
su root
#查看镜像
docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
tomcat latest 040bdb29ab37 6 weeks ago 649MB
centos 7 8652b9f0cb4c 3 months ago 204MB
#删除镜像
docker rmi centos:7
docker run -itd --privileged --name singleNode -h singleNode \ -p 2222:22 \ -p 3306:3306 \ -p 50070:50070 \ -p 8088:8088 \ -p 8080:8080 \ -p 10000:10000 \ -p 60010:60010 \ -p 9092:9092 \ centos:7 /usr/sbin/init #注释 run:通过一个镜像运行一个容器 -i:提供一个终端,一般和t或者d连用 --privileged:设置权限,如果不设置,在容器当中启动服务时会报错 --name:给容器起名 -h:直接设置容器的主机名 -p:端口映射 左边是宿主机Ubuntu端口:右边是容器端口 centos:7 代表镜像名称 /usr/sbin/init:和 --privileged参数连用 #关闭容器 docker stop singleNode #启动容器 docker start singleNode #查看容器 docker ps -a #-a:查看所有的容器,包括没有运行的容器 #删除容器 docker rmi
docker start singleNode
docker exec -it singleNode /bin/bash
#使用mobax直接连容器 可以直接拖文件上传
改端口2222
在容器里重新设置密码输入:passwd
再重新连接
yum clean all
yum -y install unzip bzip2-devel vim bashname
yum install -y openssh openssh-server openssh-clients openssl openssl-devel
ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
免密登录方式一:cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
# 启动SSH服务
systemctl start sshd
免密登录方式二:ssh-copy-id
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
systemctl stop firewalld
systemctl disable firewalld
sudo docker cp /home/b/文档/MySQL-client-5.6.46-1.el7.x86_64. singleNode:/software
cd /opt/software/
tar xvf MySQL-5.5.40-1.linux2.6.x86_64.rpm-bundle.tar
yum -y install libaio perl
rpm -ivh MySQL-server-5.5.40-1.linux2.6.x86_64.rpm
rpm -ivh MySQL-client-5.5.40-1.linux2.6.x86_64.rpm
systemctl start mysql
/usr/bin/mysqladmin -u root password 'root'
mysql -uroot -proot
> update mysql.user set host='%' where host='localhost';
> delete from mysql.user where host<>'%' or user='';
> flush privileges;
tar zxvf /opt/software/jdk-8u171-linux-x64.tar.gz -C /opt/install/
ln -s /opt/install/jdk1.8.0_171 /opt/install/java
vi /etc/profile
-------------------------------------------
export JAVA_HOME=/opt/install/java
export PATH=$JAVA_HOME/bin:$PATH
-------------------------------------------
Docker容器中解决重启配置不生效(source /etc/profile)
问题描述:docker在使用过程中,有时候自定义容器实例中的某些配置文件,例如:/etc/profile,但是修改后,每一次启动都需要手动 source 一遍令它重新生效,这很繁琐。
解决方法:在容器实例中 ~/.bashrc 配置文件末尾添加 source /etc/profile,保存退出即可。这样就能够实现重启后自动刷新配置文件,而无需手动操作。
java -version
tar zxvf /opt/hadoop-2.6.0-cdh5.14.2.tar_2.gz -C /opt/
ln -s /opt/install/hadoop-2.6.0-cdh5.14.2 /opt/install/hadoop
# 进入路径
cd /opt/hadoop/etc/hadoop/
vi core-site.xml
-------------------------------------------
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://singleNode:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/install/hadoop/data/tmp</value>
</property>
</configuration>
-------------------------------------------
vi hdfs-site.xml
-------------------------------------------
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
-------------------------------------------
vi mapred-site.xml ------------------------------------------- <configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>singleNode:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>singleNode:19888</value> </property> </configuration> -------------------------------------------
vi yarn-site.xml ------------------------------------------- <configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.resourcemanager.hostname</name> <value>singleNode</value> </property> <property> <name>yarn.log-aggregation-enable</name> <value>true</value> </property> <property> <name>yarn.log-aggregation.retain-seconds</name> <value>604800</value> </property> </configuration> -------------------------------------------
vi hadoop-env.sh
-------------------------------------------
export JAVA_HOME=/opt/java8
-------------------------------------------
vi mapred-env.sh
-------------------------------------------
export JAVA_HOME=/opt/java8
-------------------------------------------
vi yarn-env.sh
-------------------------------------------
export JAVA_HOME=/opt/java8
-------------------------------------------
vi slaves
-------------------------------------------
singleNode
-------------------------------------------
export HADOOP_HOME=/opt/install/hadoop
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export PATH=$HADOOP_HOME/bin:$PATH
hdfs namenode -format
$HADOOP_HOME/sbin/start-all.sh
查看50070端口
查看8088端口
tar zxvf /software/hive-1.1.0-cdh5.14.2.tar.gz -C /opt/
ln -s /opt/install/hive-1.1.0-cdh5.14.2 /opt/install/hive
# 进入路径
cd /opt/hive110/conf/
vi hive-site.xml ------------------------------------------- <configuration> <property> <name>hive.metastore.warehouse.dir</name> <value>/home/hadoop/hive/warehouse</value> </property> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql://singleNode:3306/hive?createDatabaseIfNotExist=true</value> </property> <property> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> </property> <property> <name>javax.jdo.option.ConnectionUserName</name> <value>root</value> </property> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>root</value> </property> <property> <name>hive.exec.scratchdir</name> <value>/home/hadoop/hive/data/hive-${user.name}</value> <description>Scratch space for Hive jobs</description> </property> <property> <name>hive.exec.local.scratchdir</name> <value>/home/hadoop/hive/data/${user.name}</value> <description>Local scratch space for Hive jobs</description> </property> </configuration> -------------------------------------------
vi hive-env.sh
-------------------------------------------
HADOOP_HOME=/opt/hadoop260
-------------------------------------------
cp /software/mysql-connector-java-5.1.31.jar /opt/hive110/lib/
export HIVE_HOME=/opt/hive110
export PATH=$HIVE_HOME/bin:$PATH
nohup hive --service metastore &
nohup hive --service hiveserver2 &
tar zxvf /software/sqoop-1.4.6-cdh5.14.2.tar.gz -C /opt/sqoop
ln -s /opt/install/sqoop-1.4.6-cdh5.14.2 /opt/install/sqoop
cd /opt/install/sqoop/conf/
vi sqoop-env.sh
-------------------------------------------
#Set path to where bin/hadoop is available
export HADOOP_COMMON_HOME=/opt/hadoop260
#Set path to where hadoop-*-core.jar is available
export HADOOP_MAPRED_HOME=/opt/hadoop260
#Set the path to where bin/hive is available
export HIVE_HOME=/opt/hive110
-------------------------------------------
cp /software/mysql-connector-java-5.1.31.jar /opt/sqoop/lib/
cp /software/java-json.jar /opt/sqoop/lib/
export SQOOP_HOME=/opt/sqoop
export PATH=$SQOOP_HOME/bin:$PATH
sqoop version
1.如果要是在docker搭建集群怎么搭建,创建不同的docker容器?
网络ip地址分配问题
#方案一:
#创建一个network
docker network create hadoopcluster
#查看network
docker network ls
#在创建容器时指定network
docker run -it --network hadoopcluster --name hadoop1 -h hadoop1 #其他设置,端口啥的
docker run -it --network hadoopcluster --name hadoop2 -h hadoop2
docker run -it --network hadoopcluster --name hadoop3 -h hadoop3
#方案二:
使用另一个工具:docker-compose
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。