import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.{SparkConf, SparkContext}

/**
  * @author dinghao 
  * @create 2021-08-19 17:27 
  * @message OK
  */
object HBaseSource {
  def main(args: Array[String]): Unit = {
    write()
    read()
  }

  def write():Unit={

  }

  def read():Unit={
    val sc = new SparkContext(new SparkConf().setAppName("HBaseSource").setMaster("local[*]"))

    val conf = HBaseConfiguration.create()
    // 设置HBase的配置
    conf.set("hbase.zookeeper.property.clientPort", ConnectMessage.get("hbase.zookeeper.property.clientPort"))
    conf.set("hbase.zookeeper.quorum", ConnectMessage.get("hbase.zookeeper.quorum"))
    conf.set("hbase.rootdir", ConnectMessage.get("hbase.rootdir"))
    conf.set("hbase.master", ConnectMessage.get("hbase.master"))
    // 设置查询的表明
    conf.set(TableInputFormat.INPUT_TABLE, ConnectMessage.get("hbase.table"))
    val RDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[Result])
    RDD.cache()
    // 遍历输出
    RDD.foreach({case(_, result) =>
      val key = Bytes.toString(result.getRow)
      val message = Bytes.toString(result.getValue(
        ConnectMessage.get("hbase.family").getBytes,
        ConnectMessage.get("hbase.column").getBytes))

      printf("ID: %s, message: %s \n", key, message)
    })
  }
}
