package cn.doitedu.dataservice

import java.util

import cn.doitedu.dataservice.beans.PvEventBean
import com.google.gson.Gson
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.SparkSession

/**
 * hbase中的目标表：
 * create 'pv_event_detail','f',SPLITS=>['c','g','k','x']
 */
object HiveEventPvDetail2Hbase {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("pv事件明细表导入hbase")
      .master("local")
      .config("spark.serializer",classOf[KryoSerializer].getName)
      .enableHiveSupport()
      .getOrCreate()

    val pvTable = spark.read.table("dwd.event_app_detail_pv").where("dt='2020-09-01'")

    // 加工数据形成目标格式：   rowkey:guid+sessionid    family:f   qualifier:q    value:[{pvevent},{pvevent},{pvevent}]
    val beanRdd = pvTable.rdd.map(row => {
      val bean = new PvEventBean()
      bean.set(
        row.getAs[String]("account"),
        row.getAs[String]("appid"),
        row.getAs[String]("appversion"),
        row.getAs[String]("carrier"),
        row.getAs[String]("deviceid"),
        row.getAs[String]("devicetype"),
        row.getAs[String]("eventid"),
        row.getAs[String]("ip"),
        row.getAs[Double]("latitude"),
        row.getAs[Double]("longitude"),
        row.getAs[String]("nettype"),
        row.getAs[String]("osname"),
        row.getAs[String]("osversion"),
        row.getAs[String]("releasechannel"),
        row.getAs[String]("resolution"),
        row.getAs[String]("sessionid"),
        row.getAs[Long]("ts"),
        row.getAs[String]("utm_campain"),
        row.getAs[String]("refurl"),
        row.getAs[String]("title"),
        row.getAs[String]("pageid"),
        row.getAs[String]("utm_loctype"),
        row.getAs[String]("url"),
        row.getAs[String]("utm_source"),
        row.getAs[String]("province"),
        row.getAs[String]("city"),
        row.getAs[String]("region"),
        row.getAs[Int]("isnew"),
        row.getAs[String]("guid")
      )
      bean
    })


    // 按guid和sessionid分组，将同一个人同一次会话中的所有pv事件聚合到一个list中
    val kvStringRdd = beanRdd.groupBy(bean=>{(bean.getGuid,bean.getSessionid)}).map(tp=>{
      val rowkeyTuple = tp._1
      val pvLst: List[PvEventBean] = tp._2.toList
      import scala.collection.JavaConverters._
      val pvLstJava: util.List[PvEventBean] = pvLst.asJava

      val gson = new Gson()
      val json =gson.toJson(pvLstJava)

      (rowkeyTuple._1+""+rowkeyTuple._2,"f","q",json)
    })

    // 将数据排序（规则：先按rowkey，再按family，再按qualifier）
    val sortedKvStringRdd = kvStringRdd.sortBy(tp=>{
      (tp._1,tp._2,tp._3)
    })

    // 调用hbase bulkloder模板代码，生成HFile文件
    val hbaseKvRdd = sortedKvStringRdd.map(tp=>{
      val rowkey = new ImmutableBytesWritable(Bytes.toBytes(tp._1))
      val kv = new KeyValue(Bytes.toBytes(tp._1),Bytes.toBytes(tp._2),Bytes.toBytes(tp._3),Bytes.toBytes(tp._4))
      (rowkey,kv)
    })


    val conf = HBaseConfiguration.create()
    conf.set("fs.defaultFS","hdfs://doitedu01:8020/")
    conf.set("hbase.zookeeper.quorum", "doitedu01,doitedu02,doitedu03")
    val job = Job.getInstance(conf)

    val conn = ConnectionFactory.createConnection(conf)
    val table = conn.getTable(TableName.valueOf("pv_event_detail"))
    val locator = conn.getRegionLocator(TableName.valueOf("pv_event_detail"))
    HFileOutputFormat2.configureIncrementalLoad(job,table,locator)

    /*hbaseKvRdd.saveAsNewAPIHadoopFile(
      "/bulkload/hive_table/pv_event_detail/2020-09-01/",
      classOf[ImmutableBytesWritable],
      classOf[KeyValue],
      classOf[HFileOutputFormat2],
      job.getConfiguration
    )*/

    // 通知hbase更新元数据，并加载数据文件到表目录
    val loader = new LoadIncrementalHFiles(job.getConfiguration)
    loader.doBulkLoad(new Path("/bulkload/hive_table/pv_event_detail/2020-09-01/"),conn.getAdmin,table,locator)



    spark.close()

  }

}
