package cn.jly.bigdata.spark.core

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.{Cell, CellUtil, HBaseConfiguration}
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author lanyangji
 * @date 2019/11/28 20:33
 */
object Spark13_HBase {

  def main(args: Array[String]): Unit = {

    val sc = new SparkContext(new SparkConf().setMaster("local[*]").setAppName("test-hbase"))

    // 创建hbase的配置对象
    val hBaseConf: Configuration = HBaseConfiguration.create()
    hBaseConf.set("hbase.zookeeper.quorum", "hadoop102,hadoop103,hadoop104")
    hBaseConf.set(TableInputFormat.INPUT_TABLE, "rddtable")

    /**
     * ImmutableBytesWritable rowKey主键类型
     */
    val hbaseRdd: RDD[(ImmutableBytesWritable, Result)] =
      sc.newAPIHadoopRDD(hBaseConf, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])

    println(hbaseRdd.count())

    hbaseRdd.foreach {
      case (rowKey, result) => {
//        val row: String = Bytes.toString(result.getRow)
//        val name: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("name")))
//        val age: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("age")))

        val cells: Array[Cell] = result.rawCells()

        for (cell <- cells) {
          //val rowKey: String = Bytes.toString(CellUtil.cloneRow(cell))
          val value: String = Bytes.toString(CellUtil.cloneValue(cell))
          print(rowKey + "\t" + value + "\t")
        }

        println
      }
    }


    println("--------------insert data into Hbase----------------")

    // 要插入的数据
    val dataRDD: RDD[(String, String, String)] = sc.makeRDD(List(("1003", "jilanyang", "22"), ("1004", "wuting", 26.toString),("1005","wanghaitao", 22.toString)))

    val putRdd: RDD[(ImmutableBytesWritable, Put)] = dataRDD.map {
      case (rowKey, name, age) => {

        // 构建put
        val put = new Put(Bytes.toBytes(rowKey))
        put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("name"), Bytes.toBytes(name))
        put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("age"), Bytes.toBytes(age))

        // 返回主键和put构成的二元组
        (new ImmutableBytesWritable(Bytes.toBytes(rowKey)), put)
      }
    }

    val jobConf = new JobConf(hBaseConf)
    jobConf.setOutputFormat(classOf[TableOutputFormat])
    jobConf.set(TableOutputFormat.OUTPUT_TABLE, "rddtable")
    // 保存到HBase
    putRdd.saveAsHadoopDataset(jobConf)

    sc.stop()
  }
}
