package com.spark.util.core

import com.spark.util.utils.HBaseUtil
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client.{Connection, Put, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles, TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants, KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import scala.collection.mutable.ListBuffer

/**
 * HBase读写支持
 */
trait HBaseIOSupport {

  val quorum: String
  val port: String
  val parent: String
  val conn:Connection

  def getHBaseConf: Configuration = {
    val conf = HBaseConfiguration.create()
    conf.set(HConstants.ZOOKEEPER_QUORUM, quorum)
    conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, port)
    conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parent)
    conf
  }

  def readTable[K,V](sc:SparkContext, tableNameStr:String, scan:Option[Scan]): RDD[(ImmutableBytesWritable,Result)] = {
    val conf = getHBaseConf
    conf.set(TableInputFormat.INPUT_TABLE, tableNameStr)
    if(scan.isDefined){
      val scanStr = HBaseUtil.convertScanToString(scan.get)
      conf.set(TableInputFormat.SCAN, scanStr)
    }
    sc.newAPIHadoopRDD(conf, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])
  }

  def saveAsBatchPut(iterator:Iterator[Put], conn:Connection, tableNameStr:String): Unit = {
    import scala.collection.JavaConversions._
    val tableName = TableName.valueOf(tableNameStr)
    conn.getTable(tableName).put(iterator.toList)
  }

  def saveAsMR(rdd:RDD[(ImmutableBytesWritable, Put)], tableNameStr:String): Unit = {
    val conf = getHBaseConf
    conf.set(TableOutputFormat.OUTPUT_TABLE, tableNameStr)
    val job = Job.getInstance(conf)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    rdd.saveAsNewAPIHadoopDataset(job.getConfiguration)
  }

  //一个列族、一个列扩展一个列族、多个列
  def saveAsHFile(rdd:RDD[(ImmutableBytesWritable, ListBuffer[KeyValue])], tmpPath:String, tableNameStr:String): Unit = {
    val conf = getHBaseConf
    //要保持整体有序
    rdd.flatMapValues(x=>x.iterator)
      .sortBy(x=>x._1, ascending = true)
      .saveAsNewAPIHadoopFile(tmpPath,
      classOf[ImmutableBytesWritable],
      classOf[KeyValue],
      classOf[HFileOutputFormat2],
      conf)
    val tableName = TableName.valueOf(tableNameStr)
    val table = conn.getTable(tableName)
    val regionLocator = conn.getRegionLocator(tableName)
    val job = Job.getInstance(conf)
    job.setJobName(s"HBaseDump2$tableNameStr")
    job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setMapOutputValueClass(classOf[KeyValue])
    HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator)
    val loader = new LoadIncrementalHFiles(conf)
    loader.doBulkLoad(new Path(tmpPath), conn.getAdmin, table, regionLocator)
  }
}
