package com.shujia.onhbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.spark.rdd.RDD

object Demo1ReadHbabe {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("onhabse")

    val sc: SparkContext = new SparkContext(conf)

    val configuration: Configuration = new Configuration
    //指定zk连接地址
    configuration.set("hbase.zookeeper.quorum", "master:2181,node1:2181,node2:2181")

    //指定读取的表
    configuration.set("hbase.mapreduce.inputtable", "shujia:student")

    //读取hbase中的数据
    val tableRDD: RDD[(ImmutableBytesWritable, Result)] = sc.newAPIHadoopRDD(
      configuration,// hadoop的配置文件对象
      classOf[TableInputFormat],//  读取hbase数据的输入格式化类
      classOf[ImmutableBytesWritable],// rowkey的类型
      classOf[Result]// 列的数据
    )


    val studentRDD: RDD[(String, String, Int, String, String)] = tableRDD.map(kv => {
      val id: String = Bytes.toString(kv._1.get())
      val result: Result = kv._2

      val name: String = Bytes.toString(result.getValue("info".getBytes(), "name".getBytes()))
      val age: String = Bytes.toString(result.getValue("info".getBytes(), "age".getBytes()))
      val gender: String = Bytes.toString(result.getValue("info".getBytes(), "gender".getBytes()))
      val clazz: String = Bytes.toString(result.getValue("info".getBytes(), "clazz".getBytes()))
      (id, name, age.toInt, gender, clazz)
    })

    studentRDD.foreach(println)


  }
}
