package com.shujia.onhbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{HConnectionManager, Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.rdd.RDD

object SparkReadHbase {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("SparkReadHbase").setMaster("local")
    val sc = new SparkContext(conf)

    val config: Configuration = new Configuration
    config.set("hbase.zookeeper.quorum", "node1:2181,node2:2181,node3:2181")
    config.set("hbase.mapreduce.inputtable", "student") //指定表名


    val rdd: RDD[(ImmutableBytesWritable, Result)] = sc.newAPIHadoopRDD(
      config,
      classOf[TableInputFormat], //格式化类型
      classOf[ImmutableBytesWritable], //数据key的类型
      classOf[Result] //value的类型
    )

    rdd.map(t => {
      val rowkey = Bytes.toString(t._1.get())
      val name = Bytes.toString(t._2.getValue("info".getBytes(), "name".getBytes()))
      val age = Bytes.toString(t._2.getValue("info".getBytes(), "age".getBytes()))

      s"$rowkey\t$name\t$age"

    }).foreach(println)

  }
}
