赞
踩
官网: https://hbase.apache.org/downloads.html
不同版本集合:https://archive.apache.org/dist/hbase/
修改hbase-env.cmd
set JAVA_HOME=D:\Development\Java\jdk1.8
# HBase内置zookeeper,使用内置zk,需要设置为true:由HBase自己管理zookeeper
set HBASE_MANAGES_ZK=true
set HADOOP_HOME=D:\Development\Hadoop
set HBASE_LOG_DIR=D:\Development\HBase\logs
修改hbase-site.xml
<configuration>
<!--HBase数据在hdfs上的存储根目录-->
<property>
<name>hbase.rootdir</name>
<value>hdfs://localhost:9000/hbase</value>
</property>
<!--是否为分布式模式部署,true表示分布式部署-->
<property>
<name>hbase.cluster.distributed</name>
<value>false</value>
</property>
<!--zookeeper集群的URL配置,多个host中间用逗号-->
<property>
<name>hbase.zookeeper.quorum</name>
<value>localhost:2181</value>
</property>
<!--HBase在zookeeper上数据的根目录znode节点-->
<property>
<name>zookeeper.znode.parent</name>
<value>/hbase</value>
</property>
<!-- zookeeper数据目录-->
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>D:\Development\HBase\data\tmp\zoo</value>
</property>
<!-- 本地文件系统tmp目录-->
<property>
<name>hbase.tmp.dir</name>
<value>D:\Development\HBase\data\tmp</value>
</property>
<!-- 使用本地文件系统设置为false,使用hdfs设置为true -->
<property>
<name>hbase.unsafe.stream.capability.enforce</name>
<value>false</value>
</property>
<!--HBase的Web界面访问端口-->
<property>
<name>hbase.master.info.port</name>
<value>16010</value>
</property>
<property>
<name>hbase.unsafe.stream.capability.enforce</name>
<value>false</value>
</property>
</configuration>
注意:先启动Hadoop,再启动HBase
Windows安装Hadoop3.x及在Windows环境下本地开发
在HBase/bin目录操作,启动HBase
D:\Development\HBase\bin>start-hbase.cmd
D:\Development\HBase\bin>hbase shell
hbase(main):001:0> list
TABLE
0 row(s)
Took 1.7100 seconds
=> []
hbase(main):002:0>
访问:http://localhost/:16010
查看Hbase情况
添加依赖
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>3.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>2.2.5</version>
</dependency>
<!--java.lang.NoSuchMethodError: 'void org.apache.hadoop.security.HadoopKerberosName.setRuleMechanism(java.lang.String)'-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<version>3.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>2.2.5</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
</dependency>
</dependencies>
添加Hadoop和HBase的配置文件到项目Resources目录
core-site.xml
hbase-site.xml
hdfs-site.xml
mapred-site.xml
yarn-site.xml
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
public class HBaseTest {
/**
* 获取HBase管理员类
*/
private Admin admin;
/**
* 获取数据库连接
*/
private Connection connection;
/**
* 初始化
*/
@Before
public void init() throws IOException {
Configuration configuration = HBaseConfiguration.create();
this.connection = ConnectionFactory.createConnection(configuration);
this.admin = connection.getAdmin();
}
/**
* 资源释放
*/
@After
public void destory() throws IOException {
if (admin != null) {
admin.close();
}
if (connection != null) {
connection.close();
}
}
/**
* 查询所有表的信息
*/
@Test
public void listTables() throws IOException {
TableName[] tableNames = admin.listTableNames();
for (TableName tableName : tableNames) {
System.out.println("tableName:" + tableName);
}
}
}
异常1:
util.FSUtils: Waiting for dfs to exit safe mode...
退出Hadoop安全模式
hadoop dfsadmin -safemode leave
异常2:
Caused by: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: no further information: Coding/192.168.138.245:16000
Caused by: java.net.ConnectException: Connection refused: no further information
最坑爹的一个问题,折腾太久太久,Hadoop与HBase版本不匹配,更换版本。本次搭建使用Hadoop3.1.3与HBase2.2.5
注意:官网给出的版本兼容不可靠,任然有Bug
节点 | HMasterActive | HMasterStandBy | HRegionServer | Zookeeper |
---|---|---|---|---|
node01 | * | * | * | |
node02 | * | * | * | |
node03 | * | * |
注意:Hadoop集群、ZooKeeper集群、Hive集群正常运行,时间同步
官网: https://hbase.apache.org/downloads.html
不同版本集合:https://archive.apache.org/dist/hbase/
wget https://archive.apache.org/dist/hbase/2.4.5/hbase-2.4.5-bin.tar.gz
解压、安装
tar -zxvf hbase-2.4.5-bin.tar.gz
mv hbase-2.4.5 hbase
cd conf
vim hbase-env.sh
export HBASE_LOG_DIR=${HBASE_HOME}/logs
export JAVA_HOME=/usr/local/jdk8
# 告诉HBase它是否应该管理自己的ZooKeeper实例
export HBASE_MANAGES_ZK=false
export HADOOP_HOME=/usr/local/program/hadoop
vim hbase-site.xml
<configuration>
<property>
<name>hbase.unsafe.stream.capability.enforce</name>
<value>false</value>
</property>
<!--设置HBase表数据,也就是真正的HBase数据在hdfs上的存储根目录-->
<property>
<name>hbase.rootdir</name>
<value>hdfs://my-hdfs/hbase</value>
</property>
<!--是否为分布式模式部署,true表示分布式部署-->
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<!--zookeeper集群的URL配置,多个host中间用逗号-->
<property>
<name>hbase.zookeeper.quorum</name>
<value>node01:2181,node02:2181,node03:2181</value>
</property>
<!--HBase在zookeeper上数据的根目录znode节点-->
<property>
<name>zookeeper.znode.parent</name>
<value>/hbase</value>
</property>
<!-- 本地文件系统tmp目录,一般配置成local模式的需要设置,建议设置 -->
<property>
<name>hbase.tmp.dir</name>
<value>/usr/local/program/hbase/tmp</value>
</property>
<!-- 使用本地文件系统设置为false,使用hdfs设置为true -->
<property>
<name>hbase.unsafe.stream.capability.enforce</name>
<value>false</value>
</property>
</configuration>
vim regionservers
node01
node02
node03
vim backup-masters
node02
vim /etc/profile
export HBASE_HOME=/usr/local/program/hbase
export PATH=$HBASE_HOME/bin:$PATH
使生效 source /etc/profile
cp /usr/local/program/hadoop/etc/hadoop/core-site.xml /usr/local/program/hbase/conf/
cp /usr/local/program/hadoop/etc/hadoop/hdfs-site.xml /usr/local/program/hbase/conf/
[root@node02 ~]# scp -r root@node01:/usr/local/program/hbase /usr/local/program
[root@node03 ~]# scp -r root@node01:/usr/local/program/hbase /usr/local/program
[root@node01 conf]# scp /etc/profile root@node02:/etc/profile
[root@node01 conf]# scp /etc/profile root@node03:/etc/profile
使配置生效:source /etc/profile
启动zookeeper
zkServer.sh start
start-all.sh
start-dfs.sh
启动HBase
start-hbase.sh
各节点进程情况:
--------- node01 ----------
11731 Jps
11348 HRegionServer
10089 NameNode
10314 QuorumPeerMain
10282 DataNode
11099 HMaster
--------- node02 ----------
4514 DataNode
2067 QuorumPeerMain
4614 SecondaryNameNode
4906 HMaster
4700 HRegionServer
5230 Jps
--------- node03 ----------
3492 HRegionServer
1947 QuorumPeerMain
3389 DataNode
3743 Jps
访问:http://node01:16010
查看Hbase集群情况
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。