package com.shujia.onhbase

import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles}
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue}
import org.apache.spark.{SparkConf, SparkContext}

object SparkToHBaseBuikLoading {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("app")
    //      .setMaster("local")


    val sc = new SparkContext(conf)
    val config = HBaseConfiguration.create
    config.set("hbase.zookeeper.quorum", "node1:2181,node2:2181,node3:2181")

    sc.textFile("/data/student/")
      .map(_.split(","))
      .map(line => {
        (line(0), line(1))
      })
      .sortBy(_._1) //生成hfile文件必须字典排序

      .map(x => {
      val kv = new KeyValue(x._1.getBytes, "info".getBytes(), "name".getBytes(), x._2.getBytes())
      (new ImmutableBytesWritable(kv.getKey), kv)
    })

      .saveAsNewAPIHadoopFile(
        "/data/spark/hfile",
        classOf[ImmutableBytesWritable],
        classOf[KeyValue],
        classOf[HFileOutputFormat2],
        config)


    //将生成的hfile文件到hbase表的目录下
    val bulkLoader = new LoadIncrementalHFiles(config)
    val table = new HTable(config, "student2")
    bulkLoader.doBulkLoad(new Path("/data/spark/hfile"), table)

  }
}
