package com.ada.spark.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.hadoop.hbase.util.Bytes

/**
  * 由于 org.apache.hadoop.hbase.mapreduce.TableInputFormat 类的实现，Spark 可以通过Hadoop输入格式访问HBase。
  * 这个输入格式会返回键值对数据，其中键的类型为org.apache.hadoop.hbase.io.ImmutableBytesWritable，而值的类型为org.apache.hadoop.hbase.client.Result。
  */
object HBaseSpark {

    def main(args: Array[String]): Unit = {

        //创建spark配置信息
        val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("HBaseSpark")

        //创建SparkContext
        val sc = new SparkContext(sparkConf)

        //构建HBase配置信息
        val conf: Configuration = HBaseConfiguration.create()
        //conf.set("hbase.zookeeper.quorum", "hadoop121,hadoop122,hadoop123")
        conf.set(TableInputFormat.INPUT_TABLE, "fruit_spark")

        //从HBase读取数据形成RDD
        val hbaseRDD: RDD[(ImmutableBytesWritable, Result)] = sc.newAPIHadoopRDD(
            conf,
            classOf[TableInputFormat],
            classOf[ImmutableBytesWritable],
            classOf[Result])

        val count: Long = hbaseRDD.count()
        println(count)

        //对hbaseRDD进行处理
        hbaseRDD.foreach {
            case (_, result) =>
                val key: Int = Bytes.toInt(result.getRow)
                val name: String = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("name")))
                val price: Int = Bytes.toInt(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("price")))
                println("RowKey:" + key + ",Name:" + name + ",Price:" + price)
        }

        //关闭连接
        sc.stop()
    }

}
