package com.datamining.hbase

import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants, TableName}
import org.apache.hadoop.mapred
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
  * Created by Administrator on 2017/5/23.
  */
/**
  * datamining
  * com.datamining.hbase
  * HbaseWriteTest
  *
  * @author Administrator kevin
  * @create 2017-05-23 15:23
  */
object HbaseWriteTest {

  def main(args: Array[String]): Unit = {
    // spark config
    val sparkConf = new SparkConf();
    sparkConf.setMaster("local[4]");
    sparkConf.setAppName("HbaseTest");

    val tableName = TableName.valueOf("student")

    // hbase config
    val hbaseConfig = HBaseConfiguration.create()
    hbaseConfig.set(HConstants.ZOOKEEPER_QUORUM, "hadoop01,hadoop02,hadoop03")
    hbaseConfig.set(HConstants.ZOOKEEPER_CLIENT_PORT, "2181")
    hbaseConfig.set(TableOutputFormat.OUTPUT_TABLE, "student")


    // spark session
    val sparkSession = SparkSession.builder().appName("HbaseWriteTest").config(conf = sparkConf).getOrCreate();

    val hbaseRdd = sparkSession.sparkContext.newAPIHadoopRDD(hbaseConfig, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])

    //  job config
    val jobConf = new JobConf(hbaseConfig, this.getClass)
    jobConf.setOutputKeyClass(classOf[ImmutableBytesWritable])
    jobConf.setOutputValueClass(classOf[Result])
    //    jobConf.setOutputFormat(classOf[TableOutputFormat[ImmutableBytesWritable]])
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, "student")

//    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])

//    new HBaseContext(sparkSession.sparkContext, hbaseConfig)

    val needAddRdd = sparkSession.sparkContext.makeRDD(Seq(("3", "testAdd3", "100"), ("4", "testAdd4", "100")))

    val addRdd = needAddRdd.map(x => {
      val put = new Put(Bytes.toBytes(x._1))
      //    put.add()
      //put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("clo"), Bytes.toBytes("value"))
      //    put.addImmutable()

      put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("name"), Bytes.toBytes(x._2))
      put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("age"), Bytes.toBytes(x._3))
      (new ImmutableBytesWritable(), put)
    })

    addRdd.saveAsHadoopDataset(jobConf)

    sparkSession.close()
  }
}
