package com.hbase

import java.util

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{Cell, CellUtil, HBaseConfiguration}
import org.apache.spark.{SparkConf, SparkContext}
import org.json4s.jackson.Serialization

import scala.collection.mutable


object HbaseRead {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("HbaseRead").setMaster("local[2]")
    val sc: SparkContext = new SparkContext(conf)

    // 连接hbase的配置
    val hbaseConf: Configuration = HBaseConfiguration.create()

    // zookeeper的配置
    //   hbaseConf.set("hbase.zookeeper.quorum", "zjj101,zjj102,zjj103")
    hbaseConf.set("hbase.zookeeper.quorum", "zjj101")

    // 读取HBase的ns1下的t1表
    hbaseConf.set(TableInputFormat.INPUT_TABLE, "ns1:t1")


    // 从hbase读数据
    val rdd1 = sc.newAPIHadoopRDD(
      hbaseConf,
      classOf[TableInputFormat],
      classOf[ImmutableBytesWritable], // rowKey封装在这个类型中
      classOf[Result]
    )
    // 读到数据封装下
    val resultRDD = rdd1.map {
      // iw只封装rowKey  result封装一行数据
      case (iw, result) => {

        val map = mutable.Map[String, Any]()
        // 把rowKey存入到map中
        map += "rowKew" -> Bytes.toString(iw.get())
        // 再把每一列也存入到map中
        val cells: util.List[Cell] = result.listCells()
        import scala.collection.JavaConversions._ //给Java列名转成scala的.
        for (cell <- cells) { // 遍历list集合
          // 列名->列值
          val key = Bytes.toString(CellUtil.cloneQualifier(cell))
          val value = Bytes.toString(CellUtil.cloneValue(cell))
          map += key -> value //  保存到map中.
        }
        // 为了好看把map转成json  json4s(json4scala)
        implicit val df = org.json4s.DefaultFormats
        Serialization.write(map)
      }
    }
    resultRDD.collect.foreach(println)
    sc.stop()

  }
}
