package hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client.{Admin, Connection, ConnectionFactory, RegionLocator, Result, Table}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.security.MessageDigest
import java.text.SimpleDateFormat
import java.util.Date

object BulkLoad {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession.builder().enableHiveSupport().
      config(new SparkConf().setAppName("Hive2Hbase")
        //指定序列化格式，默认是java序列化
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        //告知哪些类型需要序列化
        .registerKryoClasses(Array(classOf[ImmutableBytesWritable], classOf[Result]))).getOrCreate()
    println("初始化spark环境成功")


    //hbase conf
    val configuration: Configuration = HBaseConfiguration.create()
    val tableName = "TEST:tag_am__realtime_calc"
    //val tableName = "TEST:monitor_am__task_info"
    configuration.set("hbase.zookeeper.quorum", "hbase00C-test.data-infra.shopee.io,hbase00D-test.data-infra.shopee.io,hbase00E-test.data-infra.shopee.io")
    configuration.set("zookeeper.znode.parent", "/dev-hbase")
    configuration.set("hbase.client.retries.number", "1") //bulkload重试次数,默认31次
    configuration.set("hbase.mapreduce.bulkload.max.hfiles.perRegion.perFamily", "3200") //hfile最大个数
    configuration.set("hbase.hregion.max.filesize", "10737418240") //批量入库数据量32T
    //configuration.set("hbase.fs.tmp.dir", "hdfs://dev-hbase/user/sztoc_audiencemanager/hbase-staging") //hbase临时文件hdfs路径
    configuration.set("hbase.fs.tmp.dir", "hdfs://R2/user/sztoc_audiencemanager/hbase-staging") //hbase临时文件hdfs路径
    configuration.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    val conn: Connection = ConnectionFactory.createConnection(configuration)
    val regionLocator: RegionLocator = conn.getRegionLocator(TableName.valueOf(tableName))
    println("初始化hbase环境成功")

    //定义写入hfile地址
    val currentTime: String = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date)
    //val hfilePath = "hdfs://R2:/user/huanqing.cheng/hbase/" + currentTime
    val hfilePath = "hdfs://dev-hbase:8020/user/sztoc_audiencemanager/bulkload/" + currentTime

    //设置map端的k-v类型及输出的文件类型
    val table: Table = conn.getTable(TableName.valueOf(tableName))
    val admin: Admin = conn.getAdmin
    if (admin.tableExists(TableName.valueOf(tableName))) {
      println("表已经存在，无需创建")
    } else {
      println("表不存在，需要创建")
    }
    val job: Job = Job.getInstance(configuration)
    job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setMapOutputValueClass(classOf[KeyValue])
    HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)

    //读取数据源
    val df: DataFrame = spark.sql("SELECT userid AS user_id, grass_region AS region, email FROM " +
      "shopee_reg_mkt_anlys.crm_buyer_lifecycle WHERE crm_buyer_lifecycle.grass_date = date_sub('2022-02-13',2) " +
      "LIMIT 50000000")
    println("数据源读取成功,总量：" + df.count())

    val hbaseRowRdd = df.rdd.repartition(300).flatMap(rows => {
      val user_id: String = rows(0).toString
      val rowkey: String = MessageDigest.getInstance("MD5").digest(user_id.getBytes).map("%02x".format(_)).mkString
      val region: Any = rows(1)
      val email: Any = rows(2)
      Array((rowkey, ("a", "user_id", user_id)),
        (rowkey, ("a", "region", region)),
        (rowkey, ("a", "email", email)),
      )
    }).filter(x => x._1 != null)


    //rowKey排序，同时过滤空key
    val rdd: RDD[(ImmutableBytesWritable, KeyValue)] = hbaseRowRdd.repartition(1)
      .sortBy(x => (x._1, x._2._1, x._2._2)).map(x => {
      //转为字节存储后设置成先前设置的MapOutPut的k-v类型
      val rowkey = Bytes.toBytes(x._1)
      val family = Bytes.toBytes(x._2._1)
      val column = Bytes.toBytes(x._2._2)
      val value = Bytes.toBytes(x._2._3.toString)
      (new ImmutableBytesWritable(rowkey), new KeyValue(rowkey, family, column, value))
    })
    println("数据源etl成功")

    println("生成hfile的并行度：" + rdd.getNumPartitions)
    //将rdd数据存储为hfile
    rdd.saveAsNewAPIHadoopFile(hfilePath, classOf[ImmutableBytesWritable], classOf[KeyValue], classOf[HFileOutputFormat2], configuration)
    println("数据源生成hfile成功")

    //bulkload数据装载
    val startTime: Long = new Date().getTime
    val bulkLoader = new LoadIncrementalHFiles(configuration)
    bulkLoader.doBulkLoad(new Path(hfilePath), conn.getAdmin, table, regionLocator)
    println("bulkload成功")
    println("bulkload耗时:" + (new Date().getTime - startTime) / 1000 + "s")

    //资源关闭
    conn.close()
    spark.close()
    println("spark/hbase资源关闭成功")

  }

}
