package com.leal.hbase

import com.leal.util.SparkUtil
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.client.{Admin, Connection, ConnectionFactory, RegionLocator, Table}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, KeyValue, TableName}
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles}
import org.apache.hadoop.hbase.util.{Bytes, MD5Hash}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import java.net.URL

/**
 * @Classname bigdata
 * @Description HBaseBulkLoader test
 * @Date 2023/3/7 15:55
 * @Created by leal
 */
object HBaseBulkLoader {

  def main(args: Array[String]): Unit = {
    // set args
    val zooKeeper = "127.0.0.1"
    val hbaseName: String = "student_info_test_bulkload"
    val colName = "info"
    val colFields = "id"
    //[Done] 修复本地路径为hdfs
    //hbaseConf.set("fs.defaultFS", "hdfs://127.0.0.1:9000/");
    //[TODO] org.apache.hadoop.hdfs.client.HdfsDataInputStream.getReadStatistics()Lorg/apache/hadoop/hdfs/DFSInputStream$ReadStatistics;
    //  https://juejin.cn/post/7114541251784867853 hbase server jar  冲突
    // 需要寻找合适的匹配版本
    val savePath: String = "/tmp/bulkload/" + hbaseName
    //val savePath = "/tmp/bulkload"

    //operation
    val spark: SparkSession = SparkUtil.initSpark(mode = "NODE_HIVE_MAC", enableHive = true)
    val sql = "select * from leal_test.student_info;"
    val frame: DataFrame = SparkUtil.getDataFrameBySql(spark, sql)
    // frame.printSchema()
    frame.show()

    // hbase config
    val hbaseConf: Configuration = HBaseConfiguration.create(spark.sessionState.newHadoopConf())
    hbaseConf.set("hbase.zookeeper.quorum", zooKeeper)
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    hbaseConf.set("zookeeper.znode.parent", "/hbase")
    hbaseConf.set("hbase.master", "127.0.0.1:60010")
    hbaseConf.set("fs.defaultFS", "hdfs://127.0.0.1:9000/");
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, hbaseName)

    // hbase-staging
    //    hbaseConf.set("hbase.fs.tmp.dir","/tmp/hbase-staging")
    hbaseConf.set("hbase.fs.tmp.dir", "hdfs://127.0.0.1:9000/tmp/hbase-staging")

    val fields: Array[String] = frame.columns.filterNot((_: String) == "id").sorted
    val data: RDD[(ImmutableBytesWritable, KeyValue)] = frame.rdd.map {
      row: Row =>
        val rowkey: Array[Byte] = Bytes.toBytes(row.getAs(colFields).toString)
        val keyValues: Array[KeyValue] = fields.map {
          field: String => new KeyValue(rowkey, Bytes.toBytes("hfile-fy"), Bytes.toBytes(field), Bytes.toBytes(row.getAs(field).toString))
        }
        (new ImmutableBytesWritable(rowkey), keyValues)
    }.flatMapValues((x: Array[KeyValue]) => x)
      .sortByKey()

    // hbase connect
    val connection: Connection = ConnectionFactory.createConnection(hbaseConf)
    val tableName: TableName = TableName.valueOf(hbaseName)

    //没有HBase表则创建
    creteHTable(tableName, connection)

    val table: Table = connection.getTable(tableName)

    try {
      val regionLocator: RegionLocator = connection.getRegionLocator(tableName)

      val job: Job = Job.getInstance(hbaseConf)

      job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
      job.setMapOutputValueClass(classOf[KeyValue])

      HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)

      delHdfsPath(savePath, spark)
      job.getConfiguration.set("mapred.output.dir", savePath)
      data.saveAsNewAPIHadoopDataset(job.getConfiguration)

      val bulkLoader = new LoadIncrementalHFiles(hbaseConf)
      bulkLoader.doBulkLoad(new Path(savePath), connection.getAdmin, table, regionLocator)

    } finally {
      //WARN LoadIncrementalHFiles: Skipping non-directory hdfs://linux-1:9000/hfile_save/_SUCCESS 不影响,直接把文件移到HBASE对应HDFS地址了
      table.close()
      connection.close()
    }

    spark.stop()
  }

  def creteHTable(tableName: TableName, connection: Connection): Unit = {
    val admin: Admin = connection.getAdmin

    if (!admin.tableExists(tableName)) {
      val tableDescriptor = new HTableDescriptor(tableName)
      tableDescriptor.addFamily(new HColumnDescriptor(Bytes.toBytes("hfile-fy")))
      admin.createTable(tableDescriptor)
    }
  }

  def delHdfsPath(path: String, sparkSession: SparkSession): Unit = {
    val hdfs: FileSystem = FileSystem.get(sparkSession.sessionState.newHadoopConf())
    val hdfsPath = new Path(path)

    if (hdfs.exists(hdfsPath)) {
      println("输出路径存在，需删除")
      //val filePermission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ)
      hdfs.delete(hdfsPath, true)
    }
  }

  private def cleanString(row: Row, col: String): String = {
    if (row.isNullAt(row.schema.fieldIndex(col))) "" else row.getAs(col).toString
  }

  def getRowKey(row: Row, colFields: String, isHex: Boolean): String = {
    val colFieldArray: Array[String] = colFields.split(',')
    val rowkey: StringBuilder = new StringBuilder()
    for (colField <- colFieldArray) {
      rowkey.append(cleanString(row, colField))
    }
    val result: String = rowkey.mkString("_")
    if (isHex) MD5Hash.getMD5AsHex(Bytes.toBytes(result)) else result
  }
}
