package org.example

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.{CellUtil, HBaseConfiguration, HColumnDescriptor, HConstants, HTableDescriptor, KeyValue, TableName}
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, HTable, Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

import java.util.Properties

object BulkLoad {
  def main(args: Array[String]): Unit = {

    System.setProperty("java.security.krb5.conf", "/Users/td/plant_code/write2Hbase/spark2hbase/src/main/resources/krb5.conf")

    //mysql连接信息
    val sparkConf = new SparkConf()
      .setAppName("mysql2hbase")
      .setMaster("local[2]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //告知哪些类型需要序列化
      .registerKryoClasses(Array(classOf[ImmutableBytesWritable], classOf[Result]))

    val spark = SparkSession.builder.config(sparkConf).getOrCreate()


    // HBase 连接信息
    val hbaseZookeeperQuorum = "hdp73,hdp74,hdp75"
    val hbaseZookeeperPort = "2181"

    // 设置 Hadoop 配置，包括 HDFS 认证信息
    val hadoopConf = new Configuration()
//    hadoopConf.set("fs.defaultFS", "hdfs://10.10.13.134:9000") // 设置 HDFS 地址
    hadoopConf.set("hadoop.security.authentication", "kerberos") // 设置认证方式，如果使用 Kerberos 认证
    UserGroupInformation.setConfiguration(hadoopConf)
    UserGroupInformation.loginUserFromKeytab("hdfs-cluster1@HDP.COM", "/Users/td/plant_code/write2Hbase/spark2hbase/src/main/resources/hdfs.headless.keytab")


    val jdbcUrl = s"jdbc:mysql://10.57.30.217:3306/test_cdc"
    val jdbcProperties = new Properties()
    jdbcProperties.setProperty("user","root")
    jdbcProperties.setProperty("password","123456")
    Class.forName("com.mysql.jdbc.Driver")

    val mysqlDF = spark.read.jdbc(jdbcUrl, "test_a", jdbcProperties)

    val fields = mysqlDF.columns.filterNot(_ == "id").sorted

    val hbaseRdd = mysqlDF.rdd.map(row => {
      val rowKey = Bytes.toBytes(row.getInt(0))
      val kvs = fields.map { field =>
        new KeyValue(rowKey, Bytes.toBytes("cf"), Bytes.toBytes(field), Bytes.toBytes(row.getAs(field).toString))
      }

      (new ImmutableBytesWritable(rowKey) , kvs)
    }).flatMapValues(x => x).sortByKey()



    // HBase 配置
    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseZookeeperQuorum)
    hbaseConf.set(HConstants.ZOOKEEPER_CLIENT_PORT, hbaseZookeeperPort)

    // 添加 Kerberos 相关配置
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, "person")
    hbaseConf.set("hadoop.security.authentication", "kerberos")
    UserGroupInformation.setConfiguration(hbaseConf)
    UserGroupInformation.loginUserFromKeytab("hbase/td-cloudstack02@HDP.COM", "/Users/td/plant_code/write2Hbase/spark2hbase/src/main/resources/hbase.service.keytab")

    val connection = ConnectionFactory.createConnection(hbaseConf)
    val tableName = TableName.valueOf("person2")

    //没有HBase表则创建
    creteHTable(tableName, connection)
    val table = connection.getTable(tableName)

    try {
      val regionLocator = connection.getRegionLocator(tableName)

      val job = Job.getInstance(hbaseConf)

      job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
      job.setMapOutputValueClass(classOf[KeyValue])

      HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)

      val savePath = "hdfs://tdhdfs/hbase2"
      delHdfsPath(savePath, spark)

      job.getConfiguration.set("mapred.output.dir", savePath)

      //将rdd存储到hdfs目录内
      hbaseRdd.saveAsNewAPIHadoopDataset(job.getConfiguration)

      //使用bulkLoader方式加载hdfs文件到hbase中
      val bulkLoader = new LoadIncrementalHFiles(hbaseConf)
      bulkLoader.doBulkLoad(new Path(savePath), connection.getAdmin, table, regionLocator)

    } finally {
      table.close()
      connection.close()
    }

    spark.close()

  }

  def creteHTable(tableName: TableName, connection: Connection): Unit = {
    val admin = connection.getAdmin

    if (!admin.tableExists(tableName)) {
      val tableDescriptor = new HTableDescriptor(tableName)
      tableDescriptor.addFamily(new HColumnDescriptor(Bytes.toBytes("cf")))
      admin.createTable(tableDescriptor)
    }
  }

  def delHdfsPath(path: String, sparkSession: SparkSession) {
    val hdfs = FileSystem.get(sparkSession.sessionState.newHadoopConf())
    val hdfsPath = new Path(path)

    if (hdfs.exists(hdfsPath)) {
      //val filePermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ)
      hdfs.delete(hdfsPath, true)
    }
  }

}
