package cn.doitedu.datayi.loader

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.sql.{Row, SparkSession}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-10-11
 * @desc 深似海男人的一个spark作品
 *       用户画像标签表（用户交互行为标签）导入hbase
 */
object UserProfileConsumeTagsLoader {

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName("用户画像标签表（用户消费统计标签）导入hbase")
      .master("local")
      .config("spark.sql.shuffle.partitions", "2")
      .enableHiveSupport()
      .getOrCreate()


    val df = spark.sql(
      """
        |
        |select * from dws.mall_usr_prf_act where dt='2021-10-11'
        |
        |""".stripMargin)

    /**
     * +----+-------+-------+-------+-------+-------+-------+-------+-------+-------+----------+
     * |guid|tag0001|tag0002|tag0003|tag0004|tag0005|tag0006|tag0007|tag0008|tag0009|dt        |
     * +----+-------+-------+-------+-------+-------+-------+-------+-------+-------+----------+
     * |1   |3      |5      |7      |2      |4      |6      |2      |5      |10     |2021-10-11|
     * |2   |2      |3      |9      |2      |5      |6      |2      |5      |12     |2021-10-11|
     * |3   |1      |4      |7      |2      |6      |8      |2      |5      |14     |2021-10-11|
     * |4   |3      |5      |10     |2      |4      |6      |2      |5      |11     |2021-10-11|
     * +----+-------+-------+-------+-------+-------+-------+-------+-------+-------+----------+
     */


    // HFile中的数据是有固定结构的

    /**
     * rowkey,  family  ,qualifier , value
     * 1         f       TAG0001       3    => KeyValue
     * 1         f       TAG0002       5     => KeyValue
     * 1         f       TAG0003       7     => KeyValue
     */


    val tuple = df.rdd.flatMap({
      case Row(guid: Long, tag1001: Int, tag1002: Int,
      tag1003: Int, tag1004: Int, tag1005: Int,
      tag1006: Int, tag1007: Int, tag1008: Int, tag1009: Int, _)
      =>
        (guid, "f", "tag1001", tag1001) ::
          (guid, "f", "tag1002", tag1002) ::
          (guid, "f", "tag1003", tag1003) ::
          (guid, "f", "tag1004", tag1004) ::
          (guid, "f", "tag1005", tag1005) ::
          (guid, "f", "tag1006", tag1006) ::
          (guid, "f", "tag1007", tag1007) ::
          (guid, "f", "tag1008", tag1008) ::
          (guid, "f", "tag1009", tag1009) :: Nil
    })

    // 而且是有序的（按rowkey，然后family，然后qualifier）
    val sorted = tuple.sortBy(tp=>(tp._1,tp._2,tp._3))


    // 最后，数据得变成 mapreduce输出结构：  key-value   :     (ImmutablBytesWritable,KeyValue)
    val keyValueRdd = sorted.map(tp=>{
      val rowKey = new ImmutableBytesWritable(Bytes.toBytes(tp._1))
      val keyValue = new KeyValue(Bytes.toBytes(tp._1),Bytes.toBytes(tp._2),Bytes.toBytes(tp._3),Bytes.toBytes(tp._4))
      (rowKey,keyValue)
    })


    // 构造各种HFileOutputFormat所需要的配置工具
    val conf: Configuration = HBaseConfiguration.create()  // hbase-site.xml  core-site.xml hdfs-site.xml ....
    val job = Job.getInstance(conf)

    val conn = ConnectionFactory.createConnection(conf)
    val tableName  = TableName.valueOf("mall_user_profile")

    val table = conn.getTable(tableName)
    val locator = conn.getRegionLocator(tableName)


    // 对HFileOutputFormat进行参数配置
    HFileOutputFormat2.configureIncrementalLoad(job, table, locator)

    // 将RDD数据存储为HFile
    keyValueRdd.saveAsNewAPIHadoopFile("/user-profile/consume-hfile/2021-10-11/",classOf[ImmutableBytesWritable],classOf[KeyValue],classOf[HFileOutputFormat2],job.getConfiguration)

    spark.close()
  }
}
