import cn.doitedu.commons.utils.SparkUtil
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.{CellUtil, HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

/**
 * @Title: ${file_name}
 * @Package ${package_name}
 * @Description: dws层 流量会话聚合表，导入HBASE提供给上层报表系统查询
 * @Author hunter@doitedu.cn
 * @date 2020/8/1914:50
 *       操作：
 *   1. 首先要在hbase中创建好表结构
 *       [root@doitedu01 ~]# hbase shell
 *       hbase(main):013:0> create 'dws_pv_se','f'
 *
 *
 */
object DwsPvSe_Hbase {
  def main(args: Array[String]): Unit = {

    //val spark = SparkUtil.getSparkSession("dws流量会话聚合表导入HBASE")

    /* val sparkConf = new SparkConf()
     sparkConf.set("spark.serializer", classOf[KryoSerializer].getName)
     sparkConf.registerKryoClasses(Array(classOf[ImmutableBytesWritable],classOf[KeyValue]))
     val spark = SparkSession.builder()
       .appName("xxx")
       .config(sparkConf)
       .enableHiveSupport()
       .master("local")
       .getOrCreate()*/

/*

    val spark = SparkSession.builder()
      .appName("dws流量会话聚合表导入HBASE")
      .config("spark.serializer", classOf[KryoSerializer].getName)
      .enableHiveSupport()
      .master("local[*]")
      .getOrCreate()

    val df = spark.read.table("test.dws_pv_se")

    //|guid|session_id|pv_cnt|device_type|province|city  |region|dtstr     |page_id|duration|dt        |
    val data: RDD[(ImmutableBytesWritable, KeyValue)] = df.rdd.flatMap(row => {
      val guid: String = row.getAs[String]("guid")
      val session_id: String = row.getAs[String]("session_id")
      val pv_cnt: Int = row.getAs[Int]("pv_cnt")
      val device_type: String = row.getAs[String]("device_type")
      val province: String = row.getAs[String]("province")
      val city: String = row.getAs[String]("city")
      val region: String = row.getAs[String]("region")
      val dtstr: String = row.getAs[String]("dtstr")
      val page_id: String = row.getAs[String]("page_id")
      val duration: Long = row.getAs[Long]("duration")

      val lst = new ListBuffer[(String, String,String)]
      lst += ((guid,"session_id" ,session_id))
      lst += ((guid,"pv_cnt" ,pv_cnt+""))
      lst += ((guid,"device_type" ,device_type))
      lst += ((guid,"province" ,province))
      lst += ((guid,"city" ,city))
      lst += ((guid,"region" ,region))
      lst += ((guid,"dtstr" ,dtstr))
      lst += ((guid,"page_id" ,page_id))
      lst += ((guid,"duration" ,duration+""))
      lst
    })
      .sortBy(tp=>(tp._1,tp._2))
      .map(tp=>{
        val rowkey = new ImmutableBytesWritable(Bytes.toBytes(tp._1))
        val kv = new KeyValue(Bytes.toBytes(tp._1), Bytes.toBytes("f"), Bytes.toBytes(tp._2), Bytes.toBytes(tp._3))
        (rowkey,kv)
      })
*/

    val conf = HBaseConfiguration.create()
    conf.set("fs.defaultFS", "hdfs://doitedu01:8020")
    conf.set("hbase.zookeeper.quorum", "doitedu01,doitedu02,doitedu03")
    val job = Job.getInstance(conf)

    val conn = ConnectionFactory.createConnection(conf)
    val table = conn.getTable(TableName.valueOf("dws_pv_se"))
    val locator = conn.getRegionLocator(TableName.valueOf("dws_pv_se"))


    // 将我们自己的数据保存为HFile
    //HFileOutputFormat2.configureIncrementalLoad(job, table, locator)
    //data.saveAsNewAPIHadoopFile("/hfile_tmp/dws_pv_se/", classOf[ImmutableBytesWritable], classOf[KeyValue], classOf[HFileOutputFormat2], job.getConfiguration)


    // 构造一个导入hfile的工具类
    new LoadIncrementalHFiles(job.getConfiguration).doBulkLoad(new Path("/hfile_tmp/dws_pv_se/"),conn.getAdmin,table,locator)

    conn.close()
    //spark.close()

  }

}
