package com.spark.util.example

import com.spark.util.client.HBaseClient
import com.spark.util.core.Sparking
import com.spark.util.utils.HBaseUtil
import org.apache.hadoop.hbase.KeyValue
import org.apache.hadoop.hbase.client.{Put, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import scala.collection.mutable.ListBuffer

object HBaseExample extends Sparking {

  def main(args: Array[String]): Unit = {

    enableLogEliminating()
    val spark = getSparkSession(None)

    // 创建分区表
    val pTable = "test"
    val pFamily = "test"
    val pColumn = "column"
    val partitions = 3
    HBaseClient.deleteTable(pTable)
    HBaseClient.createPartitionTable(pTable,Array(pFamily),partitions)

    // 写分区表
    val pRDD = spark.sparkContext.makeRDD(List.range(0,10000))
      .map{ x=>
        val rowKey= HBaseUtil.getPartitionRowKey(x.formatted("%05d"), partitions)
        val put = new Put(Bytes.toBytes(rowKey))
        put.addColumn(Bytes.toBytes(pFamily),Bytes.toBytes(pColumn),Bytes.toBytes(x.toString))
        (new ImmutableBytesWritable,put)
      }
    HBaseClient.saveAsMR(pRDD,pTable)

    // 读分区表
    val scan1= new Scan()//第一个分区0|开头
    scan1.setStartRow(Bytes.toBytes(s"0|00020"))
    scan1.setStopRow(Bytes.toBytes(s"0|00025"))//不包含30
    val scan2= new Scan()//第二个分区1|开头
    scan2.setStartRow(Bytes.toBytes(s"1|00020"))
    scan2.setStopRow(Bytes.toBytes(s"1|00025"))
    val scan3= new Scan()//第三个分区2|开头
    scan3.setStartRow(Bytes.toBytes(s"2|00020"))
    scan3.setStopRow(Bytes.toBytes(s"2|00025"))
    val scan4= new Scan()//第四个分区3|开头
    scan4.setStartRow(Bytes.toBytes(s"3|00020"))
    scan4.setStopRow(Bytes.toBytes(s"3|00025"))
    HBaseClient.readTable(spark.sparkContext,pTable,Some(scan1))
      .union(HBaseClient.readTable(spark.sparkContext,pTable,Some(scan2)))
      .union(HBaseClient.readTable(spark.sparkContext,pTable,Some(scan3)))
      .union(HBaseClient.readTable(spark.sparkContext,pTable,Some(scan4)))
      .map(x=> (HBaseUtil.getRowKey(x),HBaseUtil.getCell(x,pFamily,pColumn)))
      .foreach(println(_))

    // 获取表行数
    HBaseClient.addCoprocessor(pTable)
    println(HBaseClient.rowCount(pTable,new Scan()))

    // HFile导入
    HBaseClient.createTable("test1",Array("family"))
    val sortedColumn = List(("id",0),("name",1)).sorted
    val hFile = spark.sparkContext.makeRDD(List.range(0,10000)).map(x=>Array(x.toString,"dff"))
      .map{ x =>
        val buffer = new ListBuffer[KeyValue]()
        val rowKey = Bytes.toBytes(x(0))
        val family = Bytes.toBytes("family")
        sortedColumn.foreach{ columnAndIndex =>
          val column = Bytes.toBytes(columnAndIndex._1)
          val value = Bytes.toBytes(x(columnAndIndex._2))
          val kv = new KeyValue(rowKey, family, column, value)
          buffer.append(kv)
        }
        (new ImmutableBytesWritable(rowKey),buffer)
      }
    HBaseClient.saveAsHFile(hFile,"hdfs://hikbigdata/test/dff","test1")
  }
}
