package com.zhao

import org.apache.commons.codec.digest.DigestUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.{HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import java.net.URI
import scala.collection.mutable.ListBuffer

/**
 * Description: spark利用BulkLoad往Hbase批量插入数据<br/>
 * Copyright (c) ，2021 ， 赵 <br/>
 * A wet person does not fear the rain. <br/>
 * Date： 2021/2/4 15:43
 * BulkLoad原理是先利用mapreduce在hdfs上生成相应的HFile文件,然后再把HFile文件导入到Hbase中,以此来达到高效批量插入数据
 *
 * @author 柒柒
 * @version : 1.0
 */

object insertWithBulkLoadWithMulti {
  def main(args: Array[String]): Unit = {

    //屏蔽不必要的日志显示在终端上
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    val sparkSession: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName)
      .master("local[*]")
      .getOrCreate()

    val sc: SparkContext = sparkSession.sparkContext

    val tableName = "test"
    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.quorum", "node01,node02,node03") //zookeeper集群
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181") //zookeeper连接端口
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, tableName)

    val conn = ConnectionFactory.createConnection(hbaseConf)
    val admin = conn.getAdmin
    val table = conn.getTable(TableName.valueOf(tableName))

    val job = Job.getInstance(hbaseConf)
    //设置job的输出格式
    job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setMapOutputValueClass(classOf[KeyValue])
    job.setOutputFormatClass(classOf[HFileOutputFormat2])
    HFileOutputFormat2.configureIncrementalLoad(job, table, conn.getRegionLocator(TableName.valueOf(tableName)))

    val rdd: RDD[(ImmutableBytesWritable, KeyValue)] = sc.textFile("a_data/a.txt")
      .map(_.split(","))
      .map(x => (DigestUtils.md5Hex(x(0)).substring(0, 3) + x(0), x(1), x(2)))
      .sortBy(_._1)
      .flatMap(x => {
        val listBuffer = new ListBuffer[(ImmutableBytesWritable, KeyValue)]
        val kv1: KeyValue = new KeyValue(Bytes.toBytes(x._1), Bytes.toBytes("cf1"), Bytes.toBytes("name"), Bytes.toBytes(x._2 + ""))
        val kv2: KeyValue = new KeyValue(Bytes.toBytes(x._1), Bytes.toBytes("cf1"), Bytes.toBytes("age"), Bytes.toBytes(x._3 + ""))
        listBuffer.append((new ImmutableBytesWritable, kv2))
        listBuffer.append((new ImmutableBytesWritable, kv1))
        listBuffer
      })
    //多列的排序,要按照列名字母表大小来
    isFileExist("hdfs://node01:9000/test", sc)

    rdd.saveAsNewAPIHadoopFile("hdfs://node01:9000/test",classOf[ImmutableBytesWritable],
      classOf[KeyValue],classOf[HFileOutputFormat2],job.getConfiguration)
    val bulkLoader: LoadIncrementalHFiles = new LoadIncrementalHFiles(hbaseConf)
    bulkLoader.doBulkLoad(new Path("hdfs://node01:9000/test"),admin,table,conn.getRegionLocator(TableName.valueOf(tableName)))
  }

  /**
   * 判断hdfs上文件是否存在,存在则删除
   *
   * @param filePath
   * @param sc
   */
  def isFileExist(filePath: String, sc: SparkContext) = {
    val output = new Path(filePath)
    val hdfs = FileSystem.get(new URI(filePath), new Configuration)


    if (hdfs.exists(output)) {
      hdfs.delete(output, true)
    }
  }
}


















