import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

object BulkLoadDemo {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("dws流量会话聚合表导入HBASE")
      .config("spark.serializer", classOf[KryoSerializer].getName)
      .master("local[1]")
      .getOrCreate()
    import spark.implicits._

    val ds = spark.createDataset(Seq(("1", "zss", 18), ("2", "bbb", 28), ("8", "ccc", 16), ("20", "ddd", 26), ("41", "eee", 86)))
    val rdd = ds.rdd.flatMap(tp=>{

      val rowkey = new ImmutableBytesWritable(Bytes.toBytes(tp._1))

      val lb: ListBuffer[(ImmutableBytesWritable, KeyValue)] = new ListBuffer[(ImmutableBytesWritable, KeyValue)]
      lb += ((rowkey,new KeyValue(Bytes.toBytes(tp._1),Bytes.toBytes("f1"),Bytes.toBytes("name"),Bytes.toBytes(tp._2))))
      lb += ((rowkey,new KeyValue(Bytes.toBytes(tp._1),Bytes.toBytes("f1"),Bytes.toBytes("age"),Bytes.toBytes(tp._3))))
      lb
    })

    implicit object HbaseOrdering extends Ordering[(ImmutableBytesWritable, KeyValue)] {
      override def compare(x: (ImmutableBytesWritable, KeyValue), y: (ImmutableBytesWritable, KeyValue)): Int = {
        val rkc: Int = x._1.compareTo(y._1)
        val f: Int = Bytes.compareTo(x._2.getFamilyArray,x._2.getFamilyOffset,x._2.getFamilyLength,y._2.getFamilyArray,y._2.getFamilyOffset,y._2.getFamilyLength)
        val q: Int = Bytes.compareTo(x._2.getQualifierArray,x._2.getQualifierOffset,x._2.getQualifierLength,y._2.getQualifierArray,y._2.getQualifierOffset,y._2.getQualifierLength)
        if(rkc==0) {
          if(f==0) q else f
        } else rkc
      }
    }

    val res = rdd.sortBy(tp=>tp)
    res.foreach(println)

    val conf = HBaseConfiguration.create()
    conf.set("fs.defaultFS", "hdfs://doitedu01:8020")
    conf.set("hbase.zookeeper.quorum", "doitedu01,doitedu02,doitedu03")
    val job = Job.getInstance(conf)

    val conn = ConnectionFactory.createConnection(conf)
    val table = conn.getTable(TableName.valueOf("t1"))
    val locator = conn.getRegionLocator(TableName.valueOf("t1"))

    // 将我们自己的数据保存为HFile
    HFileOutputFormat2.configureIncrementalLoad(job, table, locator)
    res.saveAsNewAPIHadoopFile("/hfile_tmp/t1/", classOf[ImmutableBytesWritable], classOf[KeyValue], classOf[HFileOutputFormat2], job.getConfiguration)


    // 构造一个导入hfile的工具类
    new LoadIncrementalHFiles(job.getConfiguration).doBulkLoad(new Path("/hfile_tmp/t1/"),conn.getAdmin,table,locator)

    spark.close()
  }

}
