package com.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{SparkConf, SparkContext}

object HbaseWrite {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("HbaseRead").setMaster("local[2]")
    val sc: SparkContext = new SparkContext(conf)


    val hbaseConf: Configuration = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.quorum", "zjj101")
    //    hbaseConf.set("hbase.zookeeper.quorum", "zjj101,zjj102,zjj103")
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, "ns1:t1")

    val job = Job.getInstance(hbaseConf) //得到一个Job
    //设置往外写的一个类
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    //
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Put])
    //准备写的数据 第一个值是rowkey  第二个值是name ,第三个值是age
    val initialRDD = sc.parallelize(
      List(("1010", "zjj1", "25"),
        ("1011", "zjj2", "26"),
        ("1012", "zjj3", "28")))

    // 先把rdd数据封装成 TableReduce需要的那种格式,才能往HBase里面写数据.
    val hbaseRDD = initialRDD.map {
      case (rk, name, age) =>
        val rowkey = new ImmutableBytesWritable()
        rowkey.set(Bytes.toBytes(rk)) //rowkey
        val put = new Put(Bytes.toBytes(rk))
        //cf1 是列族,
        put.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("name"), Bytes.toBytes(name))
        put.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("age"), Bytes.toBytes(age))
        (rowkey, put)
    }
    //保存到HBase的数据库里面.
    hbaseRDD.saveAsNewAPIHadoopDataset(job.getConfiguration)
    sc.stop()
  }
}
