package com.fwmagic.spark.streaming.util

import java.util.ArrayList
import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, Put, Table}
import org.apache.hadoop.hbase.util.Bytes

/**
  * Hbase工具类，用来创建Hbase的Connection
  */
object HbaseUtil {

    /**
      *
      * @param zkQuorum zookeeper地址，多个地址逗号隔开
      * @return
      */
    def getConnection(zkQuorum: String): Connection = {
        val conf = HBaseConfiguration.create()
        conf.set("hbase.zookeeper.quorum", zkQuorum)
        ConnectionFactory.createConnection(conf)
    }

    /**
      * 测试
      *
      * @param args
      */
    def main(args: Array[String]): Unit = {
        val address = "192.168.62.131:2181,192.168.62.132:2181,192.168.62.133:2181"
        //获取连接
        val connection: Connection = HbaseUtil.getConnection(address)
        val table: Table = connection.getTable(TableName.valueOf("orderb"))

        //一次写一行数据
        /*//设置RowKey
        val oid ="oid"
        //赋值
        val put = new Put(Bytes.toBytes(oid+"1"))
        put.addColumn(Bytes.toBytes("data"),Bytes.toBytes("order_id"),Bytes.toBytes(oid+"1"))
        put.addColumn(Bytes.toBytes("data"),Bytes.toBytes("total_money"),Bytes.toBytes(1000))

        put.addColumn(Bytes.toBytes("offset"),Bytes.toBytes("topic"),Bytes.toBytes("wc"))
        put.addColumn(Bytes.toBytes("offset"),Bytes.toBytes("partition"),Bytes.toBytes(0))
        put.addColumn(Bytes.toBytes("offset"),Bytes.toBytes("offset"),Bytes.toBytes(5))

        table.put(put)*/

        //批量写
        val oid = "oid"
        val puts = new ArrayList[Put](10)
        for (i <- 2 to 25) {
            val put = new Put(Bytes.toBytes(oid + i))
            put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("order_id"), Bytes.toBytes(oid + i))
            put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("total_money"), Bytes.toBytes(1000 + i))

            put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("topic"), Bytes.toBytes("wc"))
            put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("partition"), Bytes.toBytes(i))
            put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("offset"), Bytes.toBytes(i))

            puts.add(put)
        }

        //每10条写一次
        if (puts.size() % 10 == 0) {
            table.put(puts)
            puts.clear()
        }
        //写最后的一个批次
        table.put(puts)
        table.close()
        connection.close()
    }

}
