package com.sys.tdhclient.startapp

import java.util.Properties

import com.sys.tdhclient.startapp.SparkWtHbasePut.properties
import com.sys.tdhclient.utils.{HBaseUtils, SparkSc}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.KeyValue
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

object SparkWtHbaseBulkload {
  private val sparkContext: SparkContext = SparkSc.getSparkContext()
  private val sparkSession: SparkSession = SparkSc.getSparkSession()
  private val properties: Properties = SparkSc.getProperties()
  private val tableName: String = properties.getProperty("tableName")
  private val quorum: String = properties.getProperty("quorum")
  private val port: String = properties.getProperty("port")
  // 配置相关信息
  val conf = HBaseUtils.getHBaseConfiguration()
  conf.set(TableOutputFormat.OUTPUT_TABLE,tableName)

  val table = HBaseUtils.getTable(conf,tableName)

  val job = Job.getInstance(conf)
  job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
  job.setMapOutputKeyClass(classOf[KeyValue])

  HFileOutputFormat2.configureIncrementalLoadMap(job, table)


  // inputRDD data
  val indataRDD = sparkContext.makeRDD(Array("20180723_02,13","20180723_03,13","20180818_03,13"))
  val rdd = indataRDD.map(x => {
    val arr = x.split(",")
    val kv = new KeyValue(Bytes.toBytes(arr(0)),"info".getBytes,"clict_count".getBytes,arr(1).getBytes)
    (new ImmutableBytesWritable(Bytes.toBytes(arr(0))),kv)
  })

  // 保存Hfile to HDFS
  rdd.saveAsNewAPIHadoopFile("hdfs://localhost:8020/tmp/hbase",classOf[ImmutableBytesWritable],classOf[KeyValue],classOf[HFileOutputFormat2],conf)

  // Bulk写Hfile to HBase
  val bulkLoader = new LoadIncrementalHFiles(conf)
  bulkLoader.doBulkLoad(new Path("hdfs://localhost:8020/tmp/hbase"),table)
}
