赞
踩
在右侧编辑器begin-end
处编写代码补全batchOp(String tablename)
函数,参数:tablename
为待操作表的表名,要求实现如下操作:
删除表中行键为row1
,row2
的行;
获取表中行键为row3
,row10
的行;
四个操作需要依照以上先后顺序,即先删除在获取row3,row10
。
不需要你直接输出,只需要将批量操作的执行结果作为返回值返回即可。
-
- package step1;
-
- import java.util.ArrayList;
- import java.util.List;
-
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.hbase.Cell;
- import org.apache.hadoop.hbase.CellUtil;
- import org.apache.hadoop.hbase.HBaseConfiguration;
- import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.*;
- import org.apache.hadoop.hbase.client.coprocessor.Batch;
- import org.apache.hadoop.hbase.util.Bytes;
-
- public class Task {
-
- public Object[] batchOp(String tablename) throws Exception {
- /********* Begin *********/
- Configuration conf = HBaseConfiguration.create();
- Connection conn = ConnectionFactory.createConnection(conf);
- Table table = conn.getTable(TableName.valueOf(tablename));
- List<Row> rows = new ArrayList<>();
- //删除操作
- Delete delete = new Delete(Bytes.toBytes("row1"));
- Delete delete2 = new Delete(Bytes.toBytes("row2"));
- rows.add(delete);
- rows.add(delete2);
- //获取操作
- Get get = new Get(Bytes.toBytes("row3"));
- Get get2 = new Get(Bytes.toBytes("row10"));
- rows.add(get);
- rows.add(get2);
- //定义结果数组长度
- Object[] results = new Object[rows.size()];
- table.batch(rows, results);//这是一个同步的操作,批量操作的结果将会在操作之后放在results中
- //delete和put操作的结果为NONE 即没有结果
- return results;
-
- /********* End *********/
- }
- }
1.扫描一张表的步骤:
step1:创建Scan扫描对象;
Scan scan = new Scan();
step2:获取Resultscanner对象
ResultScanner scanner = table.getScanner(scan);
step3:利用ResultScanner对象遍历数据
- for(Result result : scanner){
- for (Cell kv : result.rawCells()) {
- String family = Bytes.toString(CellUtil.cloneFamily(kv));
- String qualifire = Bytes.toString(CellUtil.cloneQualifier(kv));
- String value = Bytes.toString(CellUtil.cloneValue(kv));
- values.add(value);
- System.out.println(family + ":" + qualifire + "\t" + value);
- }
- }
编写代码补全右侧scanTable(String tableName)
函数,输出表名为tableName
所有行中所有列的值。
- package step2;
-
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.hbase.Cell;
- import org.apache.hadoop.hbase.CellUtil;
- import org.apache.hadoop.hbase.HBaseConfiguration;
- import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.*;
-
- public class Task {
-
- public void scanTable(String tablename) throws Exception {
- /********* Begin *********/
- Configuration conf=HBaseConfiguration.create();
- Connection conn=ConnectionFactory.createConnection(conf);
- Table table = conn.getTable(TableName.valueOf(tablename));
- Scan scan=new Scan();
- ResultScanner scanner=table.getScanner(scan);
- for(Result result : scanner){
- for (Cell cell : result.rawCells()) {
- System.out.println(new String(CellUtil.cloneValue(cell),"utf-8"));
- }
- }
- scanner.close();
-
- /********* End *********/
- }
- }
请补全scanTable(String tablename)
函数实现扫描表的功能,参数tablename
为表名,完成如下操作:
设置扫描缓存为200
;
设置扫描值行健在1
到row199
之间;
将扫描到数据的值输出。
- package step3;
-
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.hbase.Cell;
- import org.apache.hadoop.hbase.CellUtil;
- import org.apache.hadoop.hbase.HBaseConfiguration;
- import org.apache.hadoop.hbase.TableName;
- import org.apache.hadoop.hbase.client.*;
- import org.apache.hadoop.hbase.util.Bytes;
-
- public class Task {
-
- public void scanTable(String tablename) throws Exception {
- /********* Begin *********/
- Configuration conf=HBaseConfiguration.create();
- Connection conn=ConnectionFactory.createConnection(conf);
- Table table = conn.getTable(TableName.valueOf(tablename));
- Scan scan=new Scan();
- scan.setCaching(200); //设置缓存
- scan.setStartRow(Bytes.toBytes("1"));
- scan.setStopRow(Bytes.toBytes("row199"));
- ResultScanner scanner=table.getScanner(scan);
- for(Result result:scanner){
- for(Cell cell:result.listCells()){
- System.out.println(new String(CellUtil.cloneValue(cell),"utf-8"));
- }
- }
- scanner.close();
- /********* End *********/
- }
- }
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。