package org.example.utils

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object hbaseConn {

  val session: SparkSession = SparkSession.builder().master("local").appName("readHbase").getOrCreate()

  /**
   * 创建Rdd
   * @param table
   * @return
   */
  def createRdd(table:String): RDD[(ImmutableBytesWritable, Result)] ={
    val hbaseConf: Configuration = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181");
    hbaseConf.set("hbase.rootdir", "hdfs://192.168.226.134:9000/hbase");
    hbaseConf.set("hbase.zookeeper.quorum","192.168.226.134");
    hbaseConf.set(TableInputFormat.SCAN_BATCHSIZE,"100")
    //设置读取HBase表的名称和读取数量
    hbaseConf.set(TableInputFormat.INPUT_TABLE, table);

    val rdd: RDD[(ImmutableBytesWritable, Result)] = session.sparkContext.newAPIHadoopRDD(hbaseConf, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])
    rdd
  }

}
