package com.sys.tdhclient.startapp

import java.util.Properties

import com.sys.tdhclient.startapp.SparkWtHbasePut.properties
import com.sys.tdhclient.utils.{HBaseUtils, SparkSc}
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

object SparkWtHbaseApi {
  private val sparkContext: SparkContext = SparkSc.getSparkContext()
  private val sparkSession: SparkSession = SparkSc.getSparkSession()
  private val properties: Properties = SparkSc.getProperties()
  private val tableName: String = properties.getProperty("tableName")
  private val quorum: String = properties.getProperty("quorum")
  private val port: String = properties.getProperty("port")

  // 配置相关信息
  val conf = HBaseUtils.getHBaseConfiguration()
  conf.set(TableOutputFormat.OUTPUT_TABLE,tableName)

  val jobConf = new JobConf()
//  jobConf.setOutputFormat(classOf[TableOutputFormat])
  jobConf.set(TableOutputFormat.OUTPUT_TABLE,tableName)

  // 写入数据到HBase
  val indataRDD = sparkContext.makeRDD(Array("20180723_02,10","20180723_03,10","20180818_03,50"))

  val rdd = indataRDD.map(_.split(",")).map{arr => {
    val put = new Put(Bytes.toBytes(arr(0)))
    put.add(Bytes.toBytes("info"),Bytes.toBytes("clict_count"),Bytes.toBytes(arr(1)))
    (new ImmutableBytesWritable,put)
  }}.saveAsHadoopDataset(jobConf)

  sparkContext.stop()

}
