package cn.doitedu.dw_export

import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, RegionLocator, Table}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2
import org.apache.hadoop.hbase.tool.BulkLoadHFiles
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, Row, SparkSession}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-12-25
 * @desc 画像数据导入hbase表
 */
object ProfileTagsLoader {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName("画像标签数据批量导入hbase")
      .master("local")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .enableHiveSupport()
      .getOrCreate()


    // 读取hive中准备好的标签纵表  row=>  guid,tag_id,tag_value
    val tags: Dataset[Row] = spark.read.table("dws.profile_tags_int").where("dt='2021-12-24'")

    // 将读进来的数据整理成: RDD[(ImmutableBytesWritable, KeyValue)]
    val tagsRdd: RDD[(ImmutableBytesWritable, KeyValue)] = tags.rdd.map(row => {
      val guid: Long = row.getAs[Long]("guid")
      val tagId: String = row.getAs[String]("tag_id")
      val tagValue: Int = row.getAs[Int]("tag_value")
      (guid, 'f', tagId, tagValue)
    })
      // 对数据做全局排序： 先按行键，再按family，再按qualifier 排序
      .sortBy(tp => (tp._1, tp._2, tp._3))
    // 对排好序的数据，转成大KV结构（ImmutableBytesWritable,KeyValue)
      .map(tp=>{
        (new ImmutableBytesWritable( Bytes.toBytes(tp._1)),new KeyValue(Bytes.toBytes(tp._1),Bytes.toBytes(tp._2),Bytes.toBytes(tp._3),Bytes.toBytes(tp._4)))
      })

    val conf = HBaseConfiguration.create()
    conf.set("fs.defaultFS","hdfs://doit01:8020/")
    conf.set("hbase.zookeeper.quorum","doit01:2181,doit02:2181,doit03:2181")
    val job: Job = Job.getInstance(conf)

    // 构造一个hbase的客户端
    val conn: Connection = ConnectionFactory.createConnection(conf)
    val tableName: TableName = TableName.valueOf("profile_tags")
    val table: Table = conn.getTable(tableName)
    val locator: RegionLocator = conn.getRegionLocator(tableName)

    // HfileOutputFormat的参数配置
    HFileOutputFormat2.configureIncrementalLoad(job,table,locator)


    // 将rdd数据输出成Hfile文件
    tagsRdd.saveAsNewAPIHadoopFile("/tags_bulkload/tags01/2021-12-24/",classOf[ImmutableBytesWritable],classOf[KeyValue],classOf[HFileOutputFormat2],job.getConfiguration)

    // 将生成好的hfile导入到hbase
    val bulkLoadHFiles: BulkLoadHFiles = BulkLoadHFiles.create(job.getConfiguration)
    bulkLoadHFiles.bulkLoad(tableName,new Path("/tags_bulkload/tags01/2021-12-24/"))

    table.close()
    conn.close()
    spark.close()

  }
}
