package hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{Partitioner, SparkConf, SparkContext}
import java.security.MessageDigest
import java.text.SimpleDateFormat
import java.util.Date

object BulkLoad3 {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder().enableHiveSupport().
      config(new SparkConf().setAppName("Hive2Hbase")
        //指定序列化格式，默认是java序列化
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        //告知哪些类型需要序列化
        .registerKryoClasses(Array(classOf[ImmutableBytesWritable], classOf[Result]))).getOrCreate()
    println("初始化spark环境成功")


    //hbase conf
    val configuration: Configuration = HBaseConfiguration.create()
    val tableName = "TEST:tag_am__realtime_calc"
    configuration.set("hbase.zookeeper.quorum", "hbase00C-test.data-infra.shopee.io,hbase00D-test.data-infra.shopee.io,hbase00E-test.data-infra.shopee.io")
    configuration.set("zookeeper.znode.parent", "/dev-hbase")
    configuration.set("hbase.client.retries.number", "1") //bulkload重试次数,默认31次
    //configuration.set("hbase.fs.tmp.dir", "hdfs://dev-hbase/user/sztoc_audiencemanager/hbase-staging") //hbase临时文件hdfs路径
    configuration.set("hbase.fs.tmp.dir", "hdfs://R2/user/sztoc_audiencemanager/hbase-staging") //hbase临时文件hdfs路径
    configuration.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    val conn: Connection = ConnectionFactory.createConnection(configuration)
    val regionLocator: RegionLocator = conn.getRegionLocator(TableName.valueOf(tableName))
    println("初始化hbase环境成功")

    //定义写入hfile地址
    val currentTime: String = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date)
    //val hfilePath = "hdfs://R2:/user/huanqing.cheng/hbase/" + currentTime
    val hfilePath = "hdfs://dev-hbase:8020/user/sztoc_audiencemanager/bulkload/" + currentTime

    //设置map端的k-v类型及输出的文件类型
    val table: Table = conn.getTable(TableName.valueOf(tableName))
    val admin: Admin = conn.getAdmin
    if (admin.tableExists(TableName.valueOf(tableName))) {
      println("表已经存在，无需创建")
    } else {
      println("表不存在，需要创建")
    }
    val job: Job = Job.getInstance(configuration)
    job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setMapOutputValueClass(classOf[KeyValue])
    HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)

    //读取数据源
    val df: DataFrame = spark.sql("SELECT email, grass_region AS region,userid AS user_id  FROM " +
      "shopee_reg_mkt_anlys.crm_buyer_lifecycle WHERE crm_buyer_lifecycle.grass_date = date_sub('2022-02-10',2) " +
      "LIMIT 50000000")
    //val cols: Array[String] = df.columns.sorted //列名排序结果
    println("数据源读取成功,总量：" + df.count())

    println("变成rdd后的分区数为：" + df.rdd.getNumPartitions)


    val hbaseRowRdd = df.rdd.repartition(200).flatMap(rows => {
      val email: Any = rows(0)
      val region: Any = rows(1)
      val user_id: String = rows(2).toString
      //val saltedRowkey: String = saltRowKey(user_id, 2)
      val saltedRowkey: String = saltRowKey(user_id, MessageDigest.getInstance("MD5"))
      //有序性输出（重要）
      Array(
        (saltedRowkey, ("a", "email", email)),
        (saltedRowkey, ("a", "region", region)),
        (saltedRowkey, ("a", "user_id", user_id)),
      )
    })

    //自定义分区器并未对列名进行排序, 分区内对rowKey、列名排序，排序分区数默认等于分区前分区数，默认升序
    val sortedRdd: RDD[(String, (String, String, Any))] = hbaseRowRdd.filter(x => x._1 != null)
      .repartitionAndSortWithinPartitions(MyPartitioner(8)).sortBy(x => (x._1, x._2._2))
    //println("排序后的rdd分区数：" + sortedRdd.getNumPartitions)

    val formattedRDD: RDD[(ImmutableBytesWritable, KeyValue)] = sortedRdd.map(x => {
      //转为字节存储后设置成先前设置的MapOutPut的k-v类型
      val rowkey = Bytes.toBytes(x._1)
      val family = Bytes.toBytes(x._2._1)
      val column = Bytes.toBytes(x._2._2)
      val value = Bytes.toBytes(x._2._3.toString)
      (new ImmutableBytesWritable(rowkey), new KeyValue(rowkey, family, column, value))
    })
    println("数据源etl成功")

    //将rdd数据存储为hfile
    formattedRDD.saveAsNewAPIHadoopFile(hfilePath, classOf[ImmutableBytesWritable], classOf[KeyValue], classOf[HFileOutputFormat2], configuration)
    println("数据源生成hfile成功")

    //bulkload数据装载
    val startTime: Long = new Date().getTime
    val bulkLoader = new LoadIncrementalHFiles(configuration)
    bulkLoader.doBulkLoad(new Path(hfilePath), conn.getAdmin, table, regionLocator)
    println("bulkload成功")
    println("bulkload耗时:" + (new Date().getTime - startTime) / 1000 + "s")

    //资源关闭
    conn.close()
    spark.close()
    println("spark/hbase资源关闭成功")

  }

  case class MyPartitioner[K, V](regionNums: Int) extends Partitioner {

    override def numPartitions: Int = regionNums

    override def getPartition(key: Any): Int = {
      val saltedRowKeyPreifx = key.toString.charAt(0)
      //println("key:" + key.toString + "分区键是" + saltedRowKeyPreifx)
      if (saltedRowKeyPreifx < 2) 0
      else if (saltedRowKeyPreifx >= 2 && saltedRowKeyPreifx < 4) 1
      else if (saltedRowKeyPreifx >= 4 && saltedRowKeyPreifx < 6) 2
      else if (saltedRowKeyPreifx >= 6 && saltedRowKeyPreifx < 8) 3
      else if (saltedRowKeyPreifx >= 8 && saltedRowKeyPreifx < 'a') 4
      else if (saltedRowKeyPreifx >= 'a' && saltedRowKeyPreifx < 'c') 5
      else if (saltedRowKeyPreifx >= 'c' && saltedRowKeyPreifx < 'e') 6
      else 7
    }

  }

  //  private def saltRowKey(rowkey: String, suffixSize: Int): String = {
  //    val id: Long = rowkey.toLong
  //    // If suffix size is 3, it means we want to take 3 digits, so we mod 1000
  //    // If suffix size is 2, it means we want to take 2 digits, so we mod 100
  //    // If suffix size is 1, it means we want to take 1 digit, so we mod 10
  //    // NOTE. no more than 4 digits for good
  //    val suffix: Long = id % (if (suffixSize == 3) 1000
  //    else if (suffixSize == 2) 100
  //    else 10)
  //    // Pre-append 0 to avoid data skew, if the suffixSize > 1.
  //    var strSuffix: String = String.valueOf(suffix)
  //    strSuffix = (
  //      if (suffixSize == 3) "00"
  //      else if (suffixSize == 2) "0"
  //      else "") + strSuffix
  //    println("Suffix for row key " + rowkey + " is " + strSuffix)
  //    val builder = new StringBuilder
  //    builder.append(strSuffix.substring(strSuffix.length - suffixSize)).append(":").append(rowkey)
  //    // Append the salt first
  //    // Add a delimiter or not it's up to you, better to do so
  //    // Finally, we append the row key
  //    builder.toString
  //  }

  import org.apache.hadoop.hbase.util.Bytes

  import java.math.BigInteger
  import java.security.MessageDigest

  private def saltRowKey(rowkey: String, message: MessageDigest): String = {
    val digest: Array[Byte] = message.digest(Bytes.toBytes(rowkey))
    // To lower case is a must here. Currently not support upper case.
    val md5: String = new BigInteger(1, digest).toString(16).toLowerCase
    println("MD5 for row key " + rowkey + " is " + md5)
    val saltedRowKey = md5.substring(0, 2).concat(":").concat(rowkey)
    // Also we can get the last 2: [length-2, length), it's up to you
    // Add a delimiter or not it's up to you
    // Finally, we append the row key
    saltedRowKey
  }
}


