package cn.doitedu.dw_export

import org.apache.hadoop.hbase.KeyValue
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object BulkLoaderDemo {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = null

    val data2: RDD[(ImmutableBytesWritable, KeyValue)] = null


    // 将我们自己的数据保存为HFile
    //HFileOutputFormat2.configureIncrementalLoad(job, table, locator)
    //data2.saveAsNewAPIHadoopFile("/bkld/data2/", classOf[ImmutableBytesWritable], classOf[KeyValue], classOf[HFileOutputFormat2], job.getConfiguration)


    // 构造一个导入hfile的工具类
    //val files: BulkLoadHFiles = BulkLoadHFiles.create(job.getConfiguration)
    //files.bulkLoad(tableName,new Path("/bkld/data2/"))


  }
}
