一、前置条件
- HBase服务:【快捷部署】023_HBase(2.3.6)
- 开发环境:Java(1.8)、Maven(3)、IDE(Idea 或 Eclipse)
二、相关代码
代码结构如上图中①和②
pom.xml
<?xml version="1.0" encoding="UTF-8"?><projectxmlns="http://maven.apache.org/POM/4.0.0"xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"><modelVersion>4.0.0</modelVersion><groupId>com.huawei</groupId><artifactId>HbaseAPI</artifactId><version>1.0-SNAPSHOT</version><properties><project.build.sourceEncoding>UTF-8</project.build.sourceEncoding><maven.compiler.source>1.8</maven.compiler.source><maven.compiler.target>1.8</maven.compiler.target></properties><repositories><repository><id>huaweicloud2</id><name>huaweicloud2</name><url>https://mirrors.huaweicloud.com/repository/maven/</url></repository><repository><id>huaweicloud1</id><name>huaweicloud1</name><url>https://repo.huaweicloud.com/repository/maven/huaweicloudsdk/</url></repository></repositories><dependencies><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-common</artifactId><version>2.8.3</version></dependency><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-client</artifactId><version>2.8.3</version></dependency><!--hbase--><dependency><groupId>org.apache.hbase</groupId><artifactId>hbase-client</artifactId><version>1.4.13</version></dependency><dependency><groupId>org.apache.hbase</groupId><artifactId>hbase-server</artifactId><version>1.4.13</version></dependency></dependencies><build><finalName>HbaseAPI</finalName><plugins><plugin><artifactId>maven-assembly-plugin</artifactId><configuration><descriptorRefs><descriptorRef>jar-with-dependencies</descriptorRef></descriptorRefs></configuration><executions><execution><id>make-assembly</id><phase>package</phase><goals><goal>assembly</goal></goals></execution></executions></plugin></plugins></build></project>
Config4HBaseDemo(公共配置类)
packagecom.toc.demo.hbase;/**
* 公共配置类
* @author cxy@toc
* @date 2024-05-07
*
*/publicclassConfig4HBaseDemo{publicstaticString zkQuorum ="127.0.0.1";publicstaticStringgetZkQuorum(String[] args){if(args!=null&& args.length >0){System.out.println("接收参数:"+ args[0]);
zkQuorum = args[0];}return zkQuorum;}}
CreateTable(创建Hbase表)
packagecom.toc.demo.hbase;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.hbase.HBaseConfiguration;importorg.apache.hadoop.hbase.HColumnDescriptor;importorg.apache.hadoop.hbase.HTableDescriptor;importorg.apache.hadoop.hbase.TableName;importorg.apache.hadoop.hbase.client.Admin;importorg.apache.hadoop.hbase.client.Connection;importorg.apache.hadoop.hbase.client.ConnectionFactory;importjava.io.IOException;/**
* 创建Hbase表
* @author cxy@toc
* @date 2024-05-07
*
*/publicclassCreateTable{publicstaticvoidmain(String[] args)throwsIOException{//链接hbaseConfiguration conf =HBaseConfiguration.create();//这里的zookeeper地址要改为自己集群的zookeeper地址
conf.set("hbase.zookeeper.quorum",Config4HBaseDemo.getZkQuorum(args));
conf.set("hbase.zookeeper.property.clientPort","2181");Connection connection =ConnectionFactory.createConnection(conf);Admin admin = connection.getAdmin();TableName tableName =TableName.valueOf("users");if(!admin.tableExists(tableName)){//创建表描述器HTableDescriptor htd =newHTableDescriptor(tableName);
htd.addFamily(newHColumnDescriptor("f"));
admin.createTable(htd);System.out.println(tableName+"表创建成功");}else{System.out.println(tableName+"表已经存在");}}}
DeleteData(删除数据)
packagecom.toc.demo.hbase;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.hbase.HBaseConfiguration;importorg.apache.hadoop.hbase.TableName;importorg.apache.hadoop.hbase.client.*;importorg.apache.hadoop.hbase.util.Bytes;importjava.io.IOException;/**
* 删除数据
* @author cxy@toc
* @date 2024-05-07
*
*/publicclassDeleteData{publicstaticvoidmain(String[] args)throwsIOException{//链接hbaseConfiguration conf =HBaseConfiguration.create();//这里的zookeeper地址要改为自己集群的zookeeper地址
conf.set("hbase.zookeeper.quorum",Config4HBaseDemo.getZkQuorum(args));
conf.set("hbase.zookeeper.property.clientPort","2181");Connection connection =ConnectionFactory.createConnection(conf);Table hTable = connection.getTable(TableName.valueOf("users"));Delete delete =newDelete(Bytes.toBytes("row5"));
delete.addColumn(Bytes.toBytes("f"),Bytes.toBytes("id"));//直接删除family,将所有row5的信息全部删除
delete.addFamily(Bytes.toBytes("f"));
hTable.delete(delete);System.out.println("删除成功");}}
DeleteTable(删除表)
packagecom.toc.demo.hbase;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.hbase.HBaseConfiguration;importorg.apache.hadoop.hbase.TableName;importorg.apache.hadoop.hbase.client.Admin;importorg.apache.hadoop.hbase.client.Connection;importorg.apache.hadoop.hbase.client.ConnectionFactory;importjava.io.IOException;/**
* 删除表
* @author cxy@toc
* @date 2024-05-07
*
*/publicclassDeleteTable{publicstaticvoidmain(String[] args)throwsIOException{//链接hbaseConfiguration conf =HBaseConfiguration.create();//这里的zookeeper地址要改为自己集群的zookeeper地址
conf.set("hbase.zookeeper.quorum",Config4HBaseDemo.getZkQuorum(args));
conf.set("hbase.zookeeper.property.clientPort","2181");Connection connection =ConnectionFactory.createConnection(conf);Admin hBaseAdmin = connection.getAdmin();TableName tableName =TableName.valueOf("users");if(hBaseAdmin.tableExists(tableName)){//判断表的状态if(hBaseAdmin.isTableAvailable(tableName)){
hBaseAdmin.disableTable(tableName);}
hBaseAdmin.deleteTable(tableName);System.out.println("删除表"+tableName+"成功");}else{System.out.println(tableName+"表不存在");}}}
DescTable(查看表结构)
packagecom.toc.demo.hbase;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.hbase.HBaseConfiguration;importorg.apache.hadoop.hbase.HTableDescriptor;importorg.apache.hadoop.hbase.TableName;importorg.apache.hadoop.hbase.client.*;importjava.io.IOException;/**
* 查看表结构
* @author cxy@toc
* @date 2024-05-07
*
*/publicclassDescTable{publicstaticvoidmain(String[] args)throwsIOException{//链接hbaseConfiguration conf =HBaseConfiguration.create();//这里的zookeeper地址要改为自己集群的zookeeper地址
conf.set("hbase.zookeeper.quorum",Config4HBaseDemo.getZkQuorum(args));
conf.set("hbase.zookeeper.property.clientPort","2181");Connection connection =ConnectionFactory.createConnection(conf);Admin hBaseAdmin = connection.getAdmin();TableName tableName =TableName.valueOf("users");if(hBaseAdmin.tableExists(tableName)){HTableDescriptor htd = hBaseAdmin.getTableDescriptor(tableName);System.out.println("查看"+tableName+"表结构");System.out.println(htd);}else{System.out.println(tableName+"表不存在");}}}
GetData(获取数据)
packagecom.toc.demo.hbase;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.hbase.HBaseConfiguration;importorg.apache.hadoop.hbase.TableName;importorg.apache.hadoop.hbase.client.*;importorg.apache.hadoop.hbase.util.Bytes;importjava.io.IOException;/**
* 获取数据
* @author cxy@toc
* @date 2024-05-07
*
*/publicclassGetData{publicstaticvoidmain(String[] args)throwsIOException{//链接hbaseConfiguration conf =HBaseConfiguration.create();//这里的zookeeper地址要改为自己集群的zookeeper地址
conf.set("hbase.zookeeper.quorum",Config4HBaseDemo.getZkQuorum(args));
conf.set("hbase.zookeeper.property.clientPort","2181");Connection connection =ConnectionFactory.createConnection(conf);Table hTable = connection.getTable(TableName.valueOf("users"));Get get =newGet(Bytes.toBytes("row1"));Result result = hTable.get(get);byte[] family =Bytes.toBytes("f");byte[] buf = result.getValue(family,Bytes.toBytes("id"));System.out.println("id="+Bytes.toString(buf));
buf = result.getValue(family,Bytes.toBytes("age"));System.out.println("age="+Bytes.toInt(buf));
buf = result.getValue(family,Bytes.toBytes("name"));System.out.println("name="+Bytes.toString(buf));
buf = result.getRow();System.out.println("rowkey="+Bytes.toString(buf));}}
PutData(插入数据)
packagecom.toc.demo.hbase;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.hbase.HBaseConfiguration;importorg.apache.hadoop.hbase.TableName;importorg.apache.hadoop.hbase.client.*;importorg.apache.hadoop.hbase.util.Bytes;importjava.io.IOException;importjava.util.ArrayList;importjava.util.List;/**
* 插入数据
* @author cxy@toc
* @date 2024-05-07
*
*/publicclassPutData{publicstaticvoidmain(String[] args)throwsIOException{//链接hbaseConfiguration conf =HBaseConfiguration.create();//这里的zookeeper地址要改为自己集群的zookeeper地址
conf.set("hbase.zookeeper.quorum",Config4HBaseDemo.getZkQuorum(args));
conf.set("hbase.zookeeper.property.clientPort","2181");Connection connection =ConnectionFactory.createConnection(conf);Table hTable = connection.getTable(TableName.valueOf("users"));//插入一条Put put=newPut(Bytes.toBytes("row1"));
put.addColumn(Bytes.toBytes("f"),Bytes.toBytes("id"),Bytes.toBytes("1"));
put.addColumn(Bytes.toBytes("f"),Bytes.toBytes("name"),Bytes.toBytes("张三"));
put.addColumn(Bytes.toBytes("f"),Bytes.toBytes("age"),Bytes.toBytes(27));
put.addColumn(Bytes.toBytes("f"),Bytes.toBytes("phone"),Bytes.toBytes("18600000000"));
put.addColumn(Bytes.toBytes("f"),Bytes.toBytes("emil"),Bytes.toBytes("[email protected]"));
hTable.put(put);//插入多个Put put1=newPut(Bytes.toBytes("row2"));
put1.addColumn(Bytes.toBytes("f"),Bytes.toBytes("id"),Bytes.toBytes("2"));
put1.addColumn(Bytes.toBytes("f"),Bytes.toBytes("name"),Bytes.toBytes("李四"));Put put2=newPut(Bytes.toBytes("row3"));
put2.addColumn(Bytes.toBytes("f"),Bytes.toBytes("id"),Bytes.toBytes("3"));
put2.addColumn(Bytes.toBytes("f"),Bytes.toBytes("name"),Bytes.toBytes("王五"));Put put3=newPut(Bytes.toBytes("row4"));
put3.addColumn(Bytes.toBytes("f"),Bytes.toBytes("id"),Bytes.toBytes("4"));
put3.addColumn(Bytes.toBytes("f"),Bytes.toBytes("name"),Bytes.toBytes("赵六"));List<Put> list =newArrayList<Put>();
list.add(put1);
list.add(put2);
list.add(put3);
hTable.put(list);//检测put,条件成功就插入,要求RowKey是一样的Put put4 =newPut(Bytes.toBytes("row5"));
put4.addColumn(Bytes.toBytes("f"),Bytes.toBytes("id"),Bytes.toBytes("5"));
hTable.checkAndPut(Bytes.toBytes("row5"),Bytes.toBytes("f"),Bytes.toBytes("id"),null,put4);System.out.println("插入成功");}}
ScanData(扫描遍历数据)
packagecom.toc.demo.hbase;importorg.apache.hadoop.conf.Configuration;importorg.apache.hadoop.hbase.HBaseConfiguration;importorg.apache.hadoop.hbase.TableName;importorg.apache.hadoop.hbase.client.*;importorg.apache.hadoop.hbase.filter.FilterList;importorg.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;importorg.apache.hadoop.hbase.util.Bytes;importjava.io.IOException;importjava.util.Iterator;importjava.util.Map;importjava.util.NavigableMap;/**
* 扫描遍历数据
* @author cxy@toc
* @date 2024-05-07
*
*/publicclassScanData{publicstaticvoidmain(String[] args)throwsIOException{//链接hbaseConfiguration conf =HBaseConfiguration.create();//这里的zookeeper地址要改为自己集群的zookeeper地址
conf.set("hbase.zookeeper.quorum",Config4HBaseDemo.getZkQuorum(args));
conf.set("hbase.zookeeper.property.clientPort","2181");Connection connection =ConnectionFactory.createConnection(conf);Table hTable = connection.getTable(TableName.valueOf("users"));Scan scan =newScan();//增加起始rowkey
scan.withStartRow(Bytes.toBytes("row1"));
scan.withStopRow(Bytes.toBytes("row5"));//增加过滤filterFilterList list =newFilterList(FilterList.Operator.MUST_PASS_ALL);byte[][] prefixes =newbyte[2][];
prefixes[0]=Bytes.toBytes("id");
prefixes[1]=Bytes.toBytes("name");MultipleColumnPrefixFilter mcpf =newMultipleColumnPrefixFilter(prefixes);
list.addFilter(mcpf);
scan.setFilter(list);ResultScanner rs = hTable.getScanner(scan);Iterator<Result> iter = rs.iterator();while(iter.hasNext()){Result result = iter.next();printResult(result);}}/*
打印Result对象
*/staticvoidprintResult(Result result){System.out.println("***********"+Bytes.toString(result.getRow()));NavigableMap<byte[],NavigableMap<byte[],NavigableMap<Long,byte[]>>> map = result.getMap();for(Map.Entry<byte[],NavigableMap<byte[],NavigableMap<Long,byte[]>>> entry: map.entrySet()){String family =Bytes.toString(entry.getKey());for(Map.Entry<byte[],NavigableMap<Long,byte[]>> columnEntry :entry.getValue().entrySet()){String column =Bytes.toString(columnEntry.getKey());String value ="";if("age".equals(column)){
value=""+Bytes.toInt(columnEntry.getValue().firstEntry().getValue());}else{
value=""+Bytes.toString(columnEntry.getValue().firstEntry().getValue());}System.out.println(family+":"+column+":"+value);}}}}
三、如何使用
- Maven打包 Eclipse:项目上右键 Run As -> Maven Install进行打包 Idea:Maven工具栏 -> 生命周期 -> install
打包好的jar如上图中的③
- 上传jar到Hadoop(yarn)服务器 scp 你的target/HbaseAPI-jar-with-dependencies.jar root@xxx.xxx.xxx.xxx:/root
- 登录到服务器,并查看上传的文件
ssh [email protected]
ls
- 执行命令,查看效果
# 将{ZK的内网IP}改为zookeeper的ip,如过就是本机可以不写,默认是127.0.0.1yarn jar HbaseAPI-jar-with-dependencies.jar com.toc.demo.hbase.CreateTable {ZK的内网IP}yarn jar HbaseAPI-jar-with-dependencies.jar com.toc.demo.hbase.DescTable {ZK的内网IP}yarn jar HbaseAPI-jar-with-dependencies.jar com.toc.demo.hbase.PutData {ZK的内网IP}yarn jar HbaseAPI-jar-with-dependencies.jar com.toc.demo.hbase.GetData {ZK的内网IP}yarn jar HbaseAPI-jar-with-dependencies.jar com.toc.demo.hbase.ScanData {ZK的内网IP}yarn jar HbaseAPI-jar-with-dependencies.jar com.toc.demo.hbase.DeleteData {ZK的内网IP}yarn jar HbaseAPI-jar-with-dependencies.jar com.toc.demo.hbase.DeleteTable {ZK的内网IP}
更多详细操作可参见华为云沙箱实验:https://lab.huaweicloud.com/experiment-detail_1779
往期精彩内容推荐
云原生:10分钟了解一下Kubernetes架构
云原生:5分钟了解一下Kubernetes是什么
「快速部署」第二期清单
「快速部署」第一期清单
本文转载自: https://blog.csdn.net/luckcxy/article/details/138556566
版权归原作者 第十个灵魂 所有, 如有侵权,请联系我们删除。
版权归原作者 第十个灵魂 所有, 如有侵权,请联系我们删除。