赞
踩
启动zookeeper和hadoop
mkdir -p /opt/soft
cd /opt/soft
wget https://archive.apache.org/dist/hbase/2.5.3/hbase-2.5.3-hadoop3-bin.tar.gz
tar -zxvf hbase-2.5.3-hadoop3-bin.tar.gz
mv hbase-2.5.3-hadoop3-bin hbase2
vim /etc/profile
export HBASE_HOME=/opt/soft/hbase2
export PATH=$PATH:$HBASE_HOME/bin
source /etc/profile
cd /opt/soft/hbase2/conf
vim hbase-env.sh
export JAVA_HOME=/opt/soft/jdk8
export JAVA_CLASSPATH=.:/opt/soft/jdk8/lib/dt.jar:/opt/soft/jdk8/lib/tools.jar
export HBASE_CLASSPATH=/opt/soft/hadoop3/etc/hadoop
export HBASE_MANAGES_ZK=false
export HBASE_HEAPSIZE=1G
export HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP=true
vim hbase-site.xml
<?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <configuration> <!-- mycluster是根据hdfs-site.xml的dfs.nameservices的value进行配置 --> <property> <name>hbase.rootdir</name> <value>hdfs://lihaozhe/hbase</value> </property> <property> <name>hbase.master</name> <value>8020</value> </property> <!-- zookeeper集群 --> <property> <name>hbase.zookeeper.quorum</name> <value>spark01:2181,spark02:2181,spark03:2181</value> </property> <property> <name>hbase.cluster.distributed</name> <value>true</value> </property> <property> <name>hbase.unsafe.stream.capability.enforce</name> <value>false</value> </property> </configuration>
vim regionservers
spark01
spark02
spark03
可以不配置
vim backup-masters
spark02
scp -r /opt/soft/hbase2 root@spark02:/opt/soft/
scp -r /opt/soft/hbase2 root@spark03:/opt/soft/
scp /etc/profile root@spark02:/etc
scp /etc/profile root@spark03:/etc
# 在其他节点刷新环境变量
source /etc/profile
start-hbase.sh
jps
http://spark01:16010/
http://spark02:16010/
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。