package com.shujia.onhbase

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Result, Scan}
import org.apache.hadoop.hbase.filter.{BinaryPrefixComparator, CompareFilter, RowFilter}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.spark.{SparkConf, SparkContext}

object SparkReadHbaseScan {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("app").setMaster("local")

    val sc = new SparkContext(conf)

    var scan = new Scan()

    val binaryPrefixComparator = new BinaryPrefixComparator("15001009".getBytes)
    //列名过滤器
    val filter = new RowFilter(CompareFilter.CompareOp.EQUAL, binaryPrefixComparator)
    scan.setFilter(filter)
    scan.addFamily(Bytes.toBytes("info"))

    //将scan对象转换成一个字符串
    var proto = ProtobufUtil.toScan(scan)
    var scanToString = Base64.encodeBytes(proto.toByteArray)




    val config = HBaseConfiguration.create
    config.set("hbase.zookeeper.quorum", "node1:2181,node2:2181,node3:2181")
    config.set("hbase.mapreduce.inputtable", "student") //指定表名

    config.set("hbase.mapreduce.scan", scanToString) //指定scan


    val tableRDD = sc.newAPIHadoopRDD(
      config,
      classOf[TableInputFormat],
      classOf[ImmutableBytesWritable],
      classOf[Result])

    tableRDD.map(t => {
      val rowkey = Bytes.toString(t._1.get())
      val name = Bytes.toString(t._2.getValue("info".getBytes(), "name".getBytes()))
      s"$rowkey\t$name"
    }).foreach(println)


  }
}
