package cn.doitedu.datatransfer

import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.client.{ConnectionFactory, Table}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

/**
 * 将hive中的流量会话聚合表，通过bulkloader导入hbase
 *
 * bulkloader原理：
 * 在hbase体系之外，通过自己的手段（比如mapreduce程序，spark程序）将自己的数据整理成hbase的底层文件格式：HFile
 * 然后通过客户端，请求Hbase的Master进行相关表的元数据创建修改，并将上一步骤生成好的HFile加载到HBase存储目录中对应表的表目录下
 *
 * 事先需要把hbase的表创建好
 * create 'session_agr','f'
 *
 */
object ImportSessionAgr2Hbase {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)

    val spark = SparkSession.builder()
      .appName("hive表 流量会话聚合表 导入  HBASE")
      .master("local")
      .enableHiveSupport()
      .getOrCreate()

    // 用spark去加载hive表
    val agrTable = spark.read.table("dws17.app_trf_agr_session").where("dt='2020-10-07'")

    // guid|session_id|start_ts|end_ts|first_page_id|last_page_id|pv_cnt|isnew|hour_itv|country|province|city|region|device_type

    // 整理成hbase的KV结构
    val argTableFlat: RDD[(String, String, String, String)] = agrTable.rdd
      .flatMap(row => {
        val guid = row.getAs[String]("guid") + ""
        val session_id = row.getAs[String]("session_id") + ""
        val start_ts = row.getAs[Long]("start_ts") + ""
        val end_ts = row.getAs[Long]("end_ts") + ""
        val first_page_id = row.getAs[String]("first_page_id") + ""
        val last_page_id = row.getAs[String]("last_page_id") + ""
        val pv_cnt = row.getAs[Int]("pv_cnt") + ""
        val isnew = row.getAs[Int]("isnew") + ""
        val hour_itv = row.getAs[Int]("hour_itv") + ""
        val country = row.getAs[String]("country") + ""
        val province = row.getAs[String]("province") + ""
        val city = row.getAs[String]("city") + ""
        val region = row.getAs[String]("region") + ""
        val device_type = row.getAs[String]("device_type") + ""


        val lst = new ListBuffer[(String, String, String, String)]
        // rowkey, family , qualifier ,value
        lst += ((start_ts + "|" + session_id, "f", "guid", guid))
        lst += ((start_ts + "|" + session_id, "f", "session_id", session_id))
        lst += ((start_ts + "|" + session_id, "f", "start_ts", start_ts))
        lst += ((start_ts + "|" + session_id, "f", "end_ts", end_ts))
        lst += ((start_ts + "|" + session_id, "f", "first_page_id", first_page_id))
        lst += ((start_ts + "|" + session_id, "f", "last_page_id", last_page_id))
        lst += ((start_ts + "|" + session_id, "f", "pv_cnt", pv_cnt))
        lst += ((start_ts + "|" + session_id, "f", "isnew", isnew))
        lst += ((start_ts + "|" + session_id, "f", "hour_itv", hour_itv))
        lst += ((start_ts + "|" + session_id, "f", "country", country))
        lst += ((start_ts + "|" + session_id, "f", "province", province))
        lst += ((start_ts + "|" + session_id, "f", "city", city))
        lst += ((start_ts + "|" + session_id, "f", "region", region))
        lst += ((start_ts + "|" + session_id, "f", "device_type", device_type))

        lst
      })
      .filter(tp => !tp._1.contains("null"))

    /**
     * 会话表数据转换类型
     */
    // 对数据排序:先按rowkey，再按family，再按qualifier
    val sorted = argTableFlat.sortBy(tp => (tp._1, tp._2, tp._3))

    // 将初始类型，转成HFile所需要的(immutableBytesWritable,KeyValue)
    val kvData = sorted.map(tp => {

      val rowKey = new ImmutableBytesWritable(Bytes.toBytes(tp._1))

      // rowKey,family,qualifier,value
      val kv = new KeyValue(Bytes.toBytes(tp._1), Bytes.toBytes(tp._2), Bytes.toBytes(tp._3), Bytes.toBytes(tp._4))

      (rowKey, kv)
    })

    /**
     * 生成入口页字段的二级索引表数据
     */
    val firstPageIndex = argTableFlat
      .filter(tp=>tp._3.equals("first_page_id"))
      .map(tp=>{
        // 入口页,对应会话表中的rowkey
        (tp._4,tp._1)
      })
      .groupByKey()
      .mapValues(iter=>iter.mkString("\001"))
      .map(tp=>{
        (tp._1,"f","q",tp._2)
      })
      .sortBy(tp=>(tp._1,tp._2,tp._3))
      .map(tp=>{
        val rowKey = new ImmutableBytesWritable(Bytes.toBytes(tp._1))

        // rowKey,family,qualifier,value
        val kv = new KeyValue(Bytes.toBytes(tp._1), Bytes.toBytes(tp._2), Bytes.toBytes(tp._3), Bytes.toBytes(tp._4))

        (rowKey, kv)
      })


    // 以下是bulkLoader的模板代码
    val conf = HBaseConfiguration.create()
    conf.set("fs.defaultFS", "hdfs://doitedu01:8020")
    conf.set("hbase.zookeeper.quorum", "doitedu01,doitedu02,doitedu03")

    val job = Job.getInstance(conf)


    val tableName = TableName.valueOf("session_agr")
    val conn = ConnectionFactory.createConnection(conf)
    val table: Table = conn.getTable(tableName)
    val locator = conn.getRegionLocator(tableName)   //获取Region分配器


    // 配置HFileOutputFormat2 最关键就是这一行代码
    HFileOutputFormat2.configureIncrementalLoad(job, table, locator)

    // 将准备好的数据，调用saveAsNewApiHadoopFile来保存
    val outPath = "/bulkload/session_agr/2020-10-07"
    val fs = FileSystem.get(conf)
    if(fs.exists(new Path(outPath))) fs.delete(new Path(outPath),true)

    kvData.saveAsNewAPIHadoopFile(outPath, classOf[ImmutableBytesWritable], classOf[KeyValue], classOf[HFileOutputFormat2], job.getConfiguration)

    // 通知hbase master，并加载生成好的hfile文件
    val loader = new LoadIncrementalHFiles(job.getConfiguration)
    loader.doBulkLoad(new Path(outPath), conn.getAdmin, table, locator)

    conn.close()

    spark.close()
  }

}
