package com.atguigu.sparkcore.rdd.file.bbase

import com.atguigu.sparkcore.util.MySparkContextUtil
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job

/**
 * description ：将rdd数据写入hbase
 * author ：剧情再美终是戏
 * mail : 13286520398@163.com
 * date ：Created in 2020/1/9 13:51
 * modified By ：
 * version: : 1.0
 */
object WHbase {

  def main(args: Array[String]): Unit = {
    // 创建sparkContext
    val sc = MySparkContextUtil.get(args)

    // 创建hbase相关配置
    val conf = new Configuration()
    conf.set("hbase.zookeeper.quorum", "hadoop101,hadoop102,hadoop103")
    conf.set("hbase.mapred.outputtable", "user")

    // 创建job及设置job相关属性
    val job = Job.getInstance(conf)
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Put])

    // 创建rdd
    val rdd = sc.parallelize(Array(("1003", "18", "1003s"), ("1004", "28", "1004s"), ("1005", "44", "1005s"), ("1006", "23", "1006s")), 2)

    // 将rdd封装类型
    val inputValue = rdd.map { case (key: String, age: String, name: String) => val put: Put = new Put(Bytes.toBytes(key))
      put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("age"), Bytes.toBytes(age))
      put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("name"), Bytes.toBytes(name))
      (new ImmutableBytesWritable(), put)
    }

    // 将结果插入
    inputValue.saveAsNewAPIHadoopDataset(job.getConfiguration)

    // 关闭sparkContext资源
    MySparkContextUtil.close(sc)
  }

}
