package hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SparkSession

/**
  */
object SparkHbase {
  def main(args: Array[String]): Unit = {

    // Spark环境
    val spark = SparkSession
      .builder()
      .appName("TestSparkHbase")
      .master("local")
      .getOrCreate()

    //Hbase环境
    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.property.clientPort","2181")
    hbaseConf.set("hbase.zookeeper.quorum","dw-Huake05")
    hbaseConf.set("hbase.master","dw-Huake05:60010")
    // write
    writeHBase(hbaseConf,spark)
    // get
    getHBase(hbaseConf,spark)

  }

  /**
    * 读取hbase中的数据
    * @param hbaseConf
    * @param spark
    */
  def getHBase(hbaseConf : Configuration,spark : SparkSession): Unit ={

    // 获取表名
    hbaseConf.set(TableInputFormat.INPUT_TABLE,"stu")

    // 将hbase中的数据转换成rdd
    val hbaseRDD =  spark.sparkContext.newAPIHadoopRDD(hbaseConf,
      classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])

    // 打印数据
    hbaseRDD.foreach(result => {
      val key = Bytes.toString(result._2.getRow)
      val addres = Bytes.toString(result._2.getValue("cf1".getBytes(),"addres".getBytes()))
      val age = Bytes.toString(result._2.getValue("cf1".getBytes(),"age".getBytes()))
      val sex = Bytes.toString(result._2.getValue("cf1".getBytes(),"sex".getBytes()))
      val username = Bytes.toString(result._2.getValue("cf1".getBytes(),"username".getBytes()))

      println("row key:" + key + " addres=" + addres + " sex=" + sex + " age=" + age + " username=" + username)hashCode()

      /**
        * row key:001 addres=guangzhou age=20 username=alex
        * row key:002 addres=shenzhen age=34 username=jack
        * row key:003 addres=beijing age=23 username=lili
        */

    })
  }

  /**
    * 将数据写入到hbase
    * @param hbaseConf
    * @param spark
    */
  def writeHBase(hbaseConf : Configuration,spark : SparkSession): Unit ={

    // 初始化job，设置输出格式，TableOutputFormat 是 org.apache.hadoop.hbase.mapred 包下的
    val jobConf = new JobConf(hbaseConf)

    jobConf.setOutputFormat(classOf[TableOutputFormat])

    // 获取表名
    jobConf.set(TableOutputFormat.OUTPUT_TABLE,"stu")

    // 准备数据
    val array = Array("004,shanghai,25,man,jone",
      "005,nanjing,31,women,cherry",
      "006,wuhan,18,man,pony")

    val rdd = spark.sparkContext.makeRDD(array)

    // 将写入到hbase的数据转换成rdd
    val saveRDD = rdd.map(line => line.split(",")).map(x => {

      /**
        * 一个Put对象就是一行记录，在构造方法中指定主键
        * 所有插入的数据 须用 org.apache.hadoop.hbase.util.Bytes.toBytes 转换
        * Put.addColumn 方法接收三个参数：列族，列名，数据
        */
      val put = new Put(Bytes.toBytes(x(0)+"_"+x(2)+"_"+x(3)))
      put.addColumn(Bytes.toBytes("cf1"),Bytes.toBytes("addres"),Bytes.toBytes(x(1)))
      put.addColumn(Bytes.toBytes("cf1"),Bytes.toBytes("age"),Bytes.toBytes(x(2)))
      put.addColumn(Bytes.toBytes("cf1"),Bytes.toBytes("sex"),Bytes.toBytes(x(3)))
      put.addColumn(Bytes.toBytes("cf1"),Bytes.toBytes("username"),Bytes.toBytes(x(4)))
      (new ImmutableBytesWritable,put)
    })

    // 写入到hbase中
    saveRDD.saveAsHadoopDataset(jobConf)

  }
}
