package com.leal.util

import com.leal.entity.HFilePartitioner
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, Partitioner}

/**
 * @Classname bigdata
 * @Description 生成HFie 相关方法
 * @Date 2023/3/7 14:33
 * @Created by leal
 */
object HFileUtil {

  def generateJsonHFile(
                         fileSystem: FileSystem,
                         configuration: Configuration,
                         rdd: RDD[(String, String)],
                         hFilePath: String,
                         colFamily: String,
                         colName: String,
                         tableName: String,
                         hFileNum: Int,
                         isHex: String
                       ): Unit = {

    //get hbase connection
    val connection: Connection = ConnectionFactory.createConnection(configuration)
    var admin: Admin = connection.getAdmin

    val realTable: TableName = TableName.valueOf(tableName)
    val table: Table = connection.getTable(realTable)
    val regionLocator: RegionLocator = connection.getRegionLocator(realTable)

    val partitioner: Partitioner = if ("1".equals(isHex)) new HFilePartitioner(regionLocator.getStartKeys, hFileNum) else new HashPartitioner(hFileNum)

    // set job config
    configuration.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    val job: Job = Job.getInstance(configuration)
    job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setMapOutputValueClass(classOf[KeyValue])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])

    HFileOutputFormat2 configureIncrementalLoad(job, table, regionLocator)
    rdd.repartitionAndSortWithinPartitions(partitioner).map(
      { x: (String, String) =>
        (new ImmutableBytesWritable, new KeyValue(
          Bytes.toBytes(x._1),
          Bytes.toBytes(colFamily),
          Bytes.toBytes(colName),
          Bytes.toBytes(x._2)
        ))
      }
    ).saveAsNewAPIHadoopFile(
      hFilePath,
      classOf[ImmutableBytesWritable],
      classOf[KeyValue],
      classOf[HFileOutputFormat2],
      job.getConfiguration
    )
  }

}
