package cn.doitedu.profile.tagexport

import org.apache.commons.codec.digest.DigestUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.hbase.client.{Admin, Connection, ConnectionFactory, RegionLocator, Table, TableDescriptor, TableDescriptorBuilder}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, TableOutputFormat}
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}


/**
  * @author: 余辉
  * @blog: https://blog.csdn.net/silentwolfyh
  * @create: 2019/10/19
  * @description:
  * 1、画像数据的id->gid索引数据入库bulkload程序
  * 2、hbase> create 'IDX_PROFILE_ID_GID','f'
  * 3、删除 /tmp/idx
  **/

object ProfileIndex2Hbase {

  def main(args: Array[String]): Unit = {

    // 1、建立spark连接，本地模式       import spark.implicits._
    val spark: SparkSession = SparkSession.builder().appName("ProfileTags2Hbase")
      .master("local")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()
    import spark.implicits._
    // 2、加载明细标签数据 user_profile/data/output/tag_merge/day02
    val frame: DataFrame = spark.read.parquet("user_profile/data/output/tag_merge/day02")
    val filterData: DataFrame = frame.where(" tag_module ='M000' ").select("gid","tag_module","tag_value")

    filterData.rdd.foreach(println)

    /**
      * 3、整理成 (k,v) 元组 ，
      * k（就是hbase表中的rowkye）是 ImmutableBytesWritable类型，
      * v（就是hbase表中的一个qualifier+value=> cell）是 KeyValue类型
      */
    val keyValues: RDD[(ImmutableBytesWritable, KeyValue)] = filterData.rdd.map(row => {
      val gid: Long = row.getAs[Long]("gid")
      val gidStr: String = DigestUtils.md5Hex(gid.toString).substring(0,10) + "2019-11-19"
      val tag_value: String = row.getAs[String]("tag_value")
      // 3-1、map中整理成元祖  (gidmd5, tag_module, tag_name, tag_value, weight)
      (tag_value,gidStr)
      // 3-2、对数据按hbase的要求排序： 先按rowkey，再按列族，再按qualifier
    }).sortBy(tp => (tp._1, tp._2))
      .map(tp => {
        // 3-3、 KeyValue(key，列族，列名，值)
        val value = new KeyValue(tp._1.getBytes(), "f".getBytes(), "q".getBytes(), Bytes.toBytes(tp._2))
        // 3-4、rowkey: ImmutableBytesWritable
        val key = new ImmutableBytesWritable(tp._1.getBytes())
        (key, value)
      })

    // 4、Hbase参数配置
    val conf: Configuration = HBaseConfiguration.create()
    // 4-1、zK地址
    conf.set("hbase.zookeeper.quorum", "hadoop11:2181,hadoop12:2181,hadoop13:2181")
    // 4-2、设置输出hbase的表
    conf.set(TableOutputFormat.OUTPUT_TABLE, "IDX_PROFILE_ID_GID")
    // 4-3、hdfs默認文件
    conf.set("fs.defaultFS", "hdfs://hadoop11:9000/")

    // 5、指定的其实就是rowkey类型
    val job: Job = Job.getInstance(conf)
    // 5-1、指定的其实就是rowkey类型
    job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
    // 5-2、列簇中value的类型
    job.setMapOutputValueClass(classOf[KeyValue])

    // 6、hbase的表描述
    val tableDesc: TableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf("IDX_PROFILE_ID_GID")).build()
    HFileOutputFormat2.configureIncrementalLoadMap(job, tableDesc)

    // 7、将RDD[(K,V)]利用HFileOutputFormat2存储为HFile文件
    keyValues.saveAsNewAPIHadoopFile("hdfs://hadoop11:9000/tmp/idx",
      classOf[ImmutableBytesWritable],
      classOf[KeyValue],
      classOf[HFileOutputFormat2],
      job.getConfiguration
    )

    // 8、关闭spark
    spark.close()

    println("hfile 文件 生成完毕 -----------------------")

    // 9、利用hbase提供的 LoadIncrementalHFiles.doBulkload() 来将Hfile导入hbase
    val conn: Connection = ConnectionFactory.createConnection(conf)
    val admin: Admin = conn.getAdmin
    val table: Table = conn.getTable(TableName.valueOf("IDX_PROFILE_ID_GID"))
    val locator: RegionLocator = conn.getRegionLocator(TableName.valueOf("IDX_PROFILE_ID_GID"))

    val loader = new LoadIncrementalHFiles(conf)
    loader.doBulkLoad(new Path(("hdfs://hadoop11:9000/tmp/idx")), admin, table, locator)

    println("恭喜你，hfile数据导入完成，你可以去hbase上查询数据了 -----------------------")
  }
}
