package com.gitee.dufafei.spark.connector.hbase

import com.gitee.dufafei.spark.pattern.Optional
import org.apache.hadoop.hbase.KeyValue
import org.apache.hadoop.hbase.client.{Put, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles, TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

import scala.collection.mutable.ListBuffer

class HBaseIo(client: HBaseClient) {

  import com.gitee.dufafei.spark.connector.hdfs.HdfsClient.implicits._

  def convertStringToScan(base64: String): Scan = {
    val decoded = Base64.decode(base64)
    val scan = ClientProtos.Scan.parseFrom(decoded)
    ProtobufUtil.toScan(scan)
  }

  def convertScanToString(scan: Scan): String = {
    val pScan = ProtobufUtil.toScan(scan)
    val bytes = pScan.toByteArray
    Base64.encodeBytes(bytes)
  }

  /**
   * 读取表数据
   * @param sc SparkContext
   * @param tableName 表名
   * @param scan 过滤条件
   * @return
   */
  def readTable(sc: SparkContext, tableName: String, scan: Option[Scan] = None): RDD[(ImmutableBytesWritable,Result)] = {
    val conf = client.conn.getConfiguration
    conf.set(TableInputFormat.INPUT_TABLE, tableName)
    Optional(scan).ifPresent( x => conf.set(TableInputFormat.SCAN, convertScanToString(x)))
    sc.newAPIHadoopRDD(conf, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])
  }

  def getRowKey(row: (ImmutableBytesWritable, Result)): String = Bytes.toString(row._1.get)

  def getColumn(row: (ImmutableBytesWritable, Result), family: String, qualifier: String): String =
    Bytes.toString(row._2.getValue(Bytes.toBytes(family), Bytes.toBytes(qualifier)))

  /**
   * 写入数据
   * @param rdd rdd
   * @param tableName 表名
   */
  def saveAsMR(rdd: RDD[(ImmutableBytesWritable, Put)], tableName: String): Unit = {
    val conf = client.conn.getConfiguration
    conf.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    val job = Job.getInstance(conf)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    rdd.saveAsNewAPIHadoopDataset(job.getConfiguration)
  }

  /**
   * 一个列族,一个列扩展为一个列族和多个列
   * @param rdd rdd
   * @param tmpPath 生成临时文件的路径,必须为不能存在的路径,程序自动创建
   */
  def saveHFiles(rdd: RDD[(ImmutableBytesWritable, ListBuffer[KeyValue])], tmpPath: String): Unit = {
    val conf = client.conn.getConfiguration
    rdd.flatMapValues(x => x.iterator)
      .sortBy(x => x._1, ascending = true) // 对key进行升序排序
      .saveAsNewAPIHadoopFile(
        tmpPath,
        classOf[ImmutableBytesWritable],
        classOf[KeyValue],
        classOf[HFileOutputFormat2],
        conf
      )
  }

  /**
   * @param tmpPath HFile所在路径
   * @param tableName 导入HFile的表名
   */
  def loadHFiles(tableName: String, tmpPath: String): Unit = {
    val conf = client.conn.getConfiguration
    client.usingAdmin{ admin =>
      client.usingTable(tableName) { table =>
        client.usingRegion(tableName) { region =>
          /*val job = Job.getInstance(conf)
          job.setJobName(s"HBaseDump2$tableName")
          job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
          job.setMapOutputValueClass(classOf[KeyValue])
          HFileOutputFormat2.configureIncrementalLoad(job, table, region)*/
          val loader = new LoadIncrementalHFiles(conf)
          loader.doBulkLoad(tmpPath, admin, table, region)
        }
      }
    }
  }
}
