package org.example
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HConstants, HTableDescriptor, KeyValue, TableName}
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, RegionLocator, Result, Table}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles, TableOutputFormat}
import org.apache.hadoop.hbase.spark.{ByteArrayWrapper, FamiliesQualifiersValues, FamilyHFileWriteOptions, HBaseContext}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

import java.util.Properties

object ThinBulkLoad {

  System.setProperty("java.security.krb5.conf", "/Users/td/plant_code/write2Hbase/spark2hbase/src/main/resources/krb5.conf")

  //mysql连接信息
  val sparkConf = new SparkConf()
    .setAppName("mysql2hbase")
    .setMaster("local[2]")
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    //告知哪些类型需要序列化
    .registerKryoClasses(Array(classOf[ImmutableBytesWritable], classOf[Result]))

  val spark = SparkSession.builder.config(sparkConf).getOrCreate()

  val sc: SparkContext = spark.sparkContext


  // HBase 连接信息
  val hbaseZookeeperQuorum = "hdp73,hdp74,hdp75"
  val hbaseZookeeperPort = "2181"

  // 设置 Hadoop 配置，包括 HDFS 认证信息
  val hadoopConf = new Configuration()
  hadoopConf.set("hadoop.security.authentication", "kerberos") // 设置认证方式，如果使用 Kerberos 认证
  UserGroupInformation.setConfiguration(hadoopConf)
  UserGroupInformation.loginUserFromKeytab("hdfs-cluster1@HDP.COM", "/Users/td/plant_code/write2Hbase/spark2hbase/src/main/resources/hdfs.headless.keytab")


  val jdbcUrl = s"jdbc:mysql://10.57.30.217:3306/test_cdc"
  val jdbcProperties = new Properties()
  jdbcProperties.setProperty("user", "root")
  jdbcProperties.setProperty("password", "123456")
  Class.forName("com.mysql.jdbc.Driver")


  // HBase 配置
  val hbaseConf = HBaseConfiguration.create()
  hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseZookeeperQuorum)
  hbaseConf.set(HConstants.ZOOKEEPER_CLIENT_PORT, hbaseZookeeperPort)
  hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, "person")
  hbaseConf.set("hadoop.security.authentication", "kerberos")
  UserGroupInformation.setConfiguration(hbaseConf)
  UserGroupInformation.loginUserFromKeytab("hbase/td-cloudstack02@HDP.COM", "/Users/td/plant_code/write2Hbase/spark2hbase/src/main/resources/hbase.service.keytab")

  //生成hbase客户端
  val hbaseContext = new HBaseContext(sc, hbaseConf)


  val connection = ConnectionFactory.createConnection(hbaseConf)
  val tableName = TableName.valueOf("person_2")

  //没有HBase表则创建
  creteHTable(tableName, connection)


  //hdfs存储存在就删除
  val savePath = "hdfs://tdhdfs/hbase-thin"
  delHdfsPath(savePath, spark)

  def main(args: Array[String]): Unit = {
    val mysqlDF = spark.read.jdbc(jdbcUrl, "person", jdbcProperties)
    val fields = mysqlDF.columns.filterNot(_ == "id").sorted

    //一定要导入这个包才能使用hbaseBulkLoadThinRows
    import org.apache.hadoop.hbase.spark.HBaseRDDFunctions.GenericHBaseRDDFunctions

    mysqlDF.rdd.map(row => {
      val familyQualifiersValues: FamiliesQualifiersValues = new FamiliesQualifiersValues
      val rowKey:String = row.getInt(0).toString
      fields.map(field=>{
        familyQualifiersValues += (Bytes.toBytes("cf"), Bytes.toBytes(field), Bytes.toBytes(row.getAs(field).toString))
      })

      (new ByteArrayWrapper(Bytes.toBytes(rowKey)), familyQualifiersValues)

    }).hbaseBulkLoadThinRows(hbaseContext,tableName,t => t,savePath,new java.util.HashMap[Array[Byte],FamilyHFileWriteOptions],compactionExclude = false, HConstants.DEFAULT_MAX_FILE_SIZE)


    loadHFileToHbase()

    spark.close()
  }

  def creteHTable(tableName: TableName, connection: Connection): Unit = {
    val admin = connection.getAdmin

    if (!admin.tableExists(tableName)) {
      val tableDescriptor = new HTableDescriptor(tableName)
      tableDescriptor.addFamily(new HColumnDescriptor(Bytes.toBytes("cf")))
      admin.createTable(tableDescriptor)
    }
  }

  def delHdfsPath(path: String, sparkSession: SparkSession) {
    val hdfs = FileSystem.get(sparkSession.sessionState.newHadoopConf())
    val hdfsPath = new Path(path)

    if (hdfs.exists(hdfsPath)) {
      //val filePermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ)
      hdfs.delete(hdfsPath, true)
    }
  }

  /**
   * 将File文件导入HBase，本质是移动HFile到HBase目录下
   */
  def loadHFileToHbase() = {
    //开始即那个HFile导入到Hbase,此处都是hbase的api操作
    val load: LoadIncrementalHFiles = new LoadIncrementalHFiles(hbaseConf)

    //创建hbase的链接,利用默认的配置文件,实际上读取的hbase的master地址
    val conn: Connection = ConnectionFactory.createConnection(hbaseConf)

    //根据表名获取表
    val table = connection.getTable(tableName)

    //获取hbase表的region分布
    val regionLocator: RegionLocator = conn.getRegionLocator(tableName)

    //创建一个hadoop的mapreduce的job
    val job = Job.getInstance(hbaseConf)

    //设置job名称
    job.setJobName(s"LoadIncrementalHFiles")

    //此处最重要,需要设置文件输出的key,因为我们要生成HFil,所以outkey要用ImmutableBytesWritable
    job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])

    //输出文件的内容KeyValue
    job.setMapOutputValueClass(classOf[KeyValue])

    //配置HFileOutputFormat2的信息
    HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)

    //开始导入
    load.doBulkLoad(new Path(savePath), conn.getAdmin, table, regionLocator)
  }

}
