package com.xx.sparkhbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.hadoop.hbase.client.{Put, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}

/**
 * saveAsNewAPIHadoopDataset + TableOutputFormat
 * safe集群
 * @author tzp
 * @since 2022/6/20
 */
object TableReadWithFormat {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .getOrCreate()
    val sc = spark.sparkContext

    val tableName = "dataeco-test"

    val conf: Configuration = HBaseConfiguration.create()
    val hbaseContext = new HBaseContext(sc, conf)

    hbaseContext.hbaseRDD(TableName.valueOf(tableName), new Scan("0", Filter))
  }
}
