package com.hbase.handler.hfile.scala

import util.control.Breaks._
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.client.{ConnectionFactory, Result}
import org.apache.hadoop.hbase.{CellUtil, HBaseConfiguration, KeyValue, TableName}
import org.apache.hadoop.hbase.mapreduce.{HFileOutputFormat2, LoadIncrementalHFiles}
import HFilePartitionerHelper.HFilePartitioner


object ExportHbaseTableToHfile {

    def apply(): ExportHandler = new ExportHandler()

    def main(args: Array[String]): Unit = {
        val hbaseTableName = "beidou:engine_matrix_info"
        val savePath = "/user/model/diven/test/hfile-"+System.currentTimeMillis()
        apply().hbaseTableToHfile(hbaseTableName, savePath, partitionNum=3, isDebug=true)
    }
}


/**
 * 数据导出工具
 */
class ExportHandler(sparkName: String = "SparkExportHandler") {

    /**
     * 将hbase的数据以Hfile文件格式导出到HDFS上
     * @param hbaseTableName    hbase的表名称
     * @param hfileSavePath     Hfile保存路径
     * @param partitionNum      设置的分区个数
     * @param isDebug           是否为本地调试模式
     */
    def hbaseTableToHfile(hbaseTableName: String, hfileSavePath: String, partitionNum: Int=5, isDebug:Boolean=false): Unit ={
        //获取SparkContext
        val sparkConf = new SparkConf()
        sparkConf.setAppName(sparkName).setMaster(if(isDebug) "local[*]" else "yarn")
        sparkConf.registerKryoClasses(Array[Class[_]](classOf[ImmutableBytesWritable], classOf[KeyValue]))
        val sparkContext = new SparkContext(sparkConf)
        //获取hbase的配置信息
        val hbaseConf: Configuration = HBaseConfiguration.create
        hbaseConf.set(org.apache.hadoop.hbase.mapreduce.TableInputFormat.INPUT_TABLE, hbaseTableName)
        //从hbase中获取数据
        val hbaseRdd: RDD[(ImmutableBytesWritable, Result)] = sparkContext.newAPIHadoopRDD(hbaseConf,
            classOf[org.apache.hadoop.hbase.mapreduce.TableInputFormat],
            classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
            classOf[org.apache.hadoop.hbase.client.Result])
        //将数据转换为KeyValue Rdd
        val keyValueRdd:RDD[(ImmutableBytesWritable,  KeyValue)] = hbaseRdd.flatMap{ case (key: ImmutableBytesWritable, result: Result) => {
            result.rawCells().map(cell => {
                (new ImmutableBytesWritable(result.getRow), new KeyValue(result.getRow,CellUtil.cloneFamily(cell),CellUtil.cloneQualifier(cell),CellUtil.cloneValue(cell)))
            })
        }}
        //对数据进行分区处理
        val hbaseConnection = ConnectionFactory.createConnection(hbaseConf)
        val regionLocator = hbaseConnection.getRegionLocator(TableName.valueOf(hbaseTableName))
        val hbasePartitioner = HFilePartitioner.apply(hbaseConf, regionLocator.getStartKeys, partitionNum)
        val partitionerRdd:RDD[(ImmutableBytesWritable,  KeyValue)] = keyValueRdd.repartitionAndSortWithinPartitions(hbasePartitioner)
        //保存数据到HDFS上
        partitionerRdd.saveAsNewAPIHadoopFile(hfileSavePath, classOf[ImmutableBytesWritable] , classOf[KeyValue], classOf[HFileOutputFormat2])
    }
}


/**
 * Hfile的分区工具
 */
object HFilePartitionerHelper {
    //Hfile的分区工具入口
    object HFilePartitioner {
        //apply
        def apply(conf: Configuration, splits: Array[Array[Byte]], numFilesPerRegionPerFamily: Int): HFilePartitioner = {
            if (numFilesPerRegionPerFamily == 1)
                new SingleHFilePartitioner(splits)
            else {
                val fraction = 1 max numFilesPerRegionPerFamily min conf.getInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, 32)
                new MultiHFilePartitioner(splits, fraction)
            }
        }

    }

    //数据Key的获取方式
    protected abstract class HFilePartitioner extends Partitioner {
        def extractKey(n: Any): Array[Byte] = {
            n match {
                case kv: ImmutableBytesWritable => kv.get()
            }
        }
    }

    //多分区的处理逻辑
    private class MultiHFilePartitioner(splits: Array[Array[Byte]], fraction: Int) extends HFilePartitioner {

        override def getPartition(key: Any): Int = {
            val keyData = extractKey(key)
            val hashCode = (keyData.hashCode() & Int.MaxValue) % fraction
            var partitionNum: Int = (splits.length - 1) * fraction + hashCode
            breakable {
                for (i <- 1 until splits.length) {
                    if (Bytes.compareTo(keyData, splits(i)) < 0) {
                        partitionNum = (i - 1) * fraction + hashCode
                        break()
                    }
                }
            }
            println(s"splits.length=${splits.length},partitionNum="+partitionNum)
            partitionNum
        }

        override def numPartitions: Int = splits.length * fraction
    }

    //单分区处理逻辑
    private class SingleHFilePartitioner(splits: Array[Array[Byte]]) extends HFilePartitioner {
        override def getPartition(key: Any): Int = {
            selfGetPartition(key)
        }

        def selfGetPartition(key: Any): Int = {
            val k = extractKey(key)
            for (i <- 1 until splits.length)
                if (Bytes.compareTo(k, splits(i)) < 0) return i - 1

            splits.length - 1
        }

        override def numPartitions: Int = splits.length
    }
}