package com.kingsoft.dc.khaos.module.spark.sink

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.extender.meta.model.ds.HbaseConnect
import com.kingsoft.dc.khaos.extender.meta.model.table.DmTable
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{ColumnType, HbaseConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink._
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, RelationDataStatusInfo}
import com.kingsoft.dc.khaos.module.spark.util.{CenterMetricUtils, DataframeUtils, FileUtils, MetaUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase._
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles
import org.apache.hadoop.hbase.security.User
import org.apache.hadoop.hbase.spark._
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{SerializableWritable, SparkFiles, sql}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types._
import org.apache.spark.util.LongAccumulator
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.Random

/**
  * Created by haorenhui on 2019/06/13.
  */
class HbaseSink extends SinkStrategy with Logging with Serializable {

    private var tableName: String = _
    private var namespace: String = _
    private var columnInfoMetaList: List[ExtractFieldInfo] = Nil
    private var rowkey_option: List[HBaseRKOption] = Nil
    private var rowkey_option_num: Int = 0
    private var hbaseConfig: HBaseConfig = _
    private var sparkSession: SparkSession = _
    @transient private var hbaseConf: Configuration = _
    private var broadcastedConf: Broadcast[SerializableWritable[Configuration]] = _
    //  @transient val config: Configuration

    private val WRITE_OPTION_OVERWRITE = "overwrite"
    private var HBASE_ZOOKEEPER_QUORUM: String = _
    private var HBASE_PRINCIPAL: String = _
    private var HBASE_KEYTABPATH: String = _
    private var HBASE_KRB5PATH: String = _
    private var USE_KERBEROS: Boolean = false
    private var columnEntiy: java.util.List[DmTableColumn] = _
    private var WRITE_BUFFER_SIZE: Long = 5 * 1024 * 1024

    private var ROWKEY_COL_IS_STORE: Boolean = true
    private var ROWKEY_DELIMETER: String = _
    private var WAL_WAY: Durability = Durability.USE_DEFAULT
    private val hbaseDataStatusInfo = new RelationDataStatusInfo
    private var hbaseProperties: Map[String, String] = Map[String, String]()
    private var hFilePath: String = _

    /** 数据输出 */
    override def sink(kc: KhaosContext,
                      module_id: String,
                      config: JObject,
                      schema: Schema,
                      dataFrame: DataFrame): this.type = {
        init(kc, config, module_id)
        doWrite(kc, dataFrame)
        this
    }


    /** 初始化参数 */
    def init(kc: KhaosContext, config: JObject, module_id: String): Unit = {
        implicit val formats: DefaultFormats = DefaultFormats
        hbaseConfig = config.extract[HBaseConfig]
        //权限校验
        val checkResult: Boolean = MetaUtils.checkWriteAuth(kc,
            hbaseConfig.db_name,
            hbaseConfig.table_name,
            hbaseConfig.extender.auth.clazz,
            compact(render(hbaseConfig.extender.auth.params)))

        if (!checkResult) {
            log.error(s"hbase writer fail, 权限验证未通过")
            throw new Exception(s"hbase writer fail, 权限验证未通过")
        }

        sparkSession = kc.sparkSession
        namespace = hbaseConfig.db_name
        tableName = namespace + ":" + hbaseConfig.table_name
        columnInfoMetaList = hbaseConfig.extract_fields

        initMeta(kc, module_id)
        initProperties(kc)
    }

    //初始化hbase配置文件
    def initProperties(kc: KhaosContext): Unit = {
        try {
            val operation_timeout: String = kc.conf.getString(HbaseConstants.SINK_ORIGINAL_HBASE_CLIENT_OPERATION_TIMEOUT, HbaseConstants.DEFAULT_SINK_ORIGINAL_HBASE_CLIENT_OPERATION_TIMEOUT)
            val retries_number: String = kc.conf.getString(HbaseConstants.SINK_ORIGINAL_HBASE_CLIENT_RETRIES_NUMBER, HbaseConstants.DEFAULT_SINK_ORIGINAL_HBASE_CLIENT_RETRIES_NUMBER)
            val rpc_timeout: String = kc.conf.getString(HbaseConstants.SINK_ORIGINAL_HBASE_CLIENT_RPC_TIMEOUT, HbaseConstants.DEFAULT_SINK_ORIGINAL_HBASE_CLIENT_RPC_TIMEOUT)
            val write_buffer: String = kc.conf.getString(HbaseConstants.SINK_ORIGINAL_HBASE_CLIENT_WRITE_BUFFER, HbaseConstants.DEFAULT_SINK_ORIGINAL_HBASE_CLIENT_WRITE_BUFFER)
            hbaseConf.set(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, operation_timeout)
            hbaseConf.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries_number)
            hbaseConf.set(HConstants.HBASE_RPC_TIMEOUT_KEY, rpc_timeout)
            hbaseConf.set("hbase.client.write.buffer", write_buffer)

            hbaseProperties = kc.conf.getAllWithUnPrefix("module.hbase.sink.original.").toMap
            log.info("hbaseSink hbaseProperties")
            hbaseProperties.foreach { case (k, v) => log.info(k + "   " + v) }
            hbaseProperties.foreach { case (k, v) => hbaseConf.set(k, v) }

            //todo 重复设置?
            WRITE_BUFFER_SIZE = write_buffer.toLong
            WAL_WAY = kc.conf.getString(HbaseConstants.SINK_WALWAY, HbaseConstants.DEFAULT_SINK_WALWAY) match {
                case "USE_DEFAULT" => Durability.USE_DEFAULT
                case "SYNC_WAL" => Durability.SYNC_WAL
                case "SKIP_WAL" => Durability.SKIP_WAL
                case "FSYNC_WAL" => Durability.FSYNC_WAL
                case "ASYNC_WAL" => Durability.ASYNC_WAL
                case _ => Durability.USE_DEFAULT
            }
            log.info(s"hbase WAL_WAY $WAL_WAY")
        } catch {
            case e: Exception =>
                log.error("未读取到hbase配置! 改用默认配置")
        }
    }

    /** 初始化hbase相关参数 */
    def initMeta(kc: KhaosContext, module_id: String): Unit = {
        import scala.collection.JavaConverters._
        implicit val formats: DefaultFormats = DefaultFormats
        //获取元数据
        val entity: MetaDataEntity = MetaUtils.getHBaseMeta(kc,
            hbaseConfig.db_name,
            hbaseConfig.table_name,
            hbaseConfig.extender.meta.clazz,
            compact(render(hbaseConfig.extender.meta.params)),
            this)
        val connect: HbaseConnect = entity.dsHBaseConnect
        val dmTable: DmTable = entity.tableEntiy
        //数据管理拿到的嵌套字段列表 手动拼接
        columnEntiy = entity.columnEntiy.asScala.flatMap((dmCol: DmTableColumn) => {
            val buffer = new ArrayBuffer[DmTableColumn]()
            val family: String = dmCol.getColName
            if (!dmCol.getCols.isEmpty) {
                dmCol.getCols.asScala.foreach((dm: DmTableColumn) => {
                    val name: String = dm.getColName
                    dm.setColName(family + ":" + name)
                    buffer.append(dm)
                })
            }
            buffer
        }).asJava


        //获取rowkey拼接规则
        dmTable.getParams.asScala.foreach(param => {
            param.get("pKey") match {
                case "ROWKEY_COLS" =>
                    rowkey_option = parse(param.get("pValue").toString, true: Boolean).extract[List[HBaseRKOption]]
                    rowkey_option.foreach((rk: HBaseRKOption) => {
                        for (i <- columnInfoMetaList.indices) {
                            val col: ExtractFieldInfo = columnInfoMetaList(i)
                            if (rk.colName.equals(col.field)) {
                                rk.data_type = Option(col.data_type)
                            }
                        }
                    })

                case "ROWKEY_RANDOM_NUMBER_LENGTH" =>
                    if (param.get("pValue").toString.nonEmpty)
                        rowkey_option_num = param.get("pValue").toString.toInt

                case _ =>
            }
        })

        ROWKEY_COL_IS_STORE = true
        ROWKEY_DELIMETER = ""
        if (false) {
            ROWKEY_COL_IS_STORE = false
            ROWKEY_DELIMETER = ":"
        }

        hbaseConf = HBaseConfiguration.create()
        //内置hbase
        if (entity.getDatasourceEntiy.isDefaultDs) {
            hbaseConf.addResource(kc.conf.getString("env.HADOOP_CONF_DIR") + "/hbase-site.xml")
        }
        //外置hbase
        HBASE_ZOOKEEPER_QUORUM = connect.getZkAddresses
        val zookeeper_znode_parent: String = connect.getZookeeperZnodeParent
        hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, HBASE_ZOOKEEPER_QUORUM)
        hbaseConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zookeeper_znode_parent)

        log.info("hbase sink kerberos status : " + connect.getUseKrbs)
        if (connect.getUseKrbs.toBoolean) {
            USE_KERBEROS = true

            //数据管理给的keytab krb5文件
            HBASE_PRINCIPAL = connect.getPrincipal
            val keytabFile: String = connect.getKeytabFile
            val krb5File: String = connect.getKrb5File

            //项目keytab 暂时不用 底层未做权限控制
            /*HBASE_PRINCIPAL = kc.conf.getString("proxy.user")
            HBASE_KEYTABPATH = System.getenv("SPARK_YARN_STAGING_DIR") + "/" + kc.conf.getString("proxy.keytab.location").split("/").last
            HBASE_KRB5PATH = kc.conf.getString("proxy.krb5.conf")*/


            //默认realm 如果HBASE_PRINCIPAL里包含realm,则替换
            var realm = "HADOOP.COM"
            val prinsArr: Array[String] = HBASE_PRINCIPAL.split("@")
            if (prinsArr.length >= 2) {
                realm = prinsArr(1)
            }
            hbaseConf.set("hbase.security.authentication", "kerberos")
            hbaseConf.set("hadoop.security.authentication", "kerberos")
            hbaseConf.set("hbase.master.kerberos.principal", "hbase/_HOST@" + realm)
            hbaseConf.set("hbase.regionserver.kerberos.principal", "hbase/_HOST@" + realm)

            // 加module_id是防止同个任务操作多个hbase
            val keytabPath: String = System.getenv("SPARK_YARN_STAGING_DIR") + "/hbase_sink_" + module_id + ".keytab"
            val krb5Path: String = System.getenv("SPARK_YARN_STAGING_DIR") + "/krb5_" + module_id + ".conf"
            FileUtils.decoderBase64File(keytabFile, keytabPath, FileSystem.newInstance(new Configuration()))
            FileUtils.decoderBase64File(krb5File, krb5Path, FileSystem.newInstance(new Configuration()))

            HBASE_KEYTABPATH = keytabPath
            HBASE_KRB5PATH = krb5Path

            log.info("hbase 开启kerberos")
            log.info("==>user " + HBASE_PRINCIPAL)
            log.info("==>keytab " + HBASE_KEYTABPATH)
            log.info("==>krb5 " + HBASE_KRB5PATH)

            kc.sparkSession.sparkContext.addFile(HBASE_KRB5PATH)
            kc.sparkSession.sparkContext.addFile(HBASE_KEYTABPATH)
        }
        hFilePath = System.getenv("SPARK_YARN_STAGING_DIR") + s"/hbase_sink_${module_id}_bulkLoad_tmp/"
        broadcastedConf = sparkSession.sparkContext.broadcast(new SerializableWritable(hbaseConf))
    }


    /** 写入数据 */
    def doWrite(kc: KhaosContext, data: DataFrame): Any = {
        var convertDataFrame: DataFrame = DataframeUtils.setDefaultValue(columnInfoMetaList, columnEntiy, data)
        convertDataFrame = DataframeUtils.convertDataType4HBase(columnInfoMetaList, convertDataFrame)
        convertDataFrame = convertTimestamp(columnInfoMetaList, convertDataFrame)
        val partitionNum: Int = DataframeUtils.rePartitions(kc, convertDataFrame, columnEntiy)
        convertDataFrame = DataframeUtils.repartionDataframe(convertDataFrame, partitionNum, this)
        val writeMode = hbaseConfig.write_option.get.trim.toLowerCase
        val (calculateData, accumulator): (DataFrame, LongAccumulator) = DataframeUtils.calculateDataNum(kc, convertDataFrame, "HBaseSink")

        val bulkLoadRDD: RDD[Put] = convertBulkLoadRDD(calculateData)
        try {
            if (writeMode.equals("put")) {
                doPut(kc, convertDataFrame)
            } else if (writeMode.equals("bulkload")) {
                doBulkLoad(kc, bulkLoadRDD, hFilePath)
                //        doBulkLoadTest(kc, calculateData, hFilePath)
            } else {
                log.error(s"hbase writer暂不支持改写入方式:${writeMode}")
            }
        } catch {
            case e: Exception =>
                e.printStackTrace()
                log.error(s"hbase writer put fail,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}")
                throw new Exception(s"hbase writer put fail,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}", e)

        }

        //上报数据状态
        hbaseDataStatusInfo.setDataNum(String.valueOf(accumulator.value))
        DataframeUtils.reportDataStatusRelation(kc, hbaseDataStatusInfo, namespace, hbaseConfig.table_name,
            hbaseConfig.extender.meta.clazz, compact(render(hbaseConfig.extender.meta.params)))
        //上报运维中心指标
        val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
        metric.setProcessDataLValue(hbaseDataStatusInfo.getDataNum.toLong)
        CenterMetricUtils.reportSyncProcessData(metric, kc)
    }

    /**
      * Put数据
      *
      * @return void
      */
    def doPut(kc: KhaosContext, data: DataFrame): Unit = {
        val accumulator: LongAccumulator = kc.sparkSession.sparkContext.longAccumulator("DataNumber" + "_" + "HBaseSink" + "_" + Random.nextInt(1000))
        data.foreachPartition((iter: Iterator[sql.Row]) => {
            var hbaseConn: Connection = null
            var mutator: BufferedMutator = null

            if (USE_KERBEROS) {
                val keytabPath: String = SparkFiles.get(HBASE_KEYTABPATH.split("/").last)
                val krpath: String = SparkFiles.get(HBASE_KRB5PATH.split("/").last)

                System.setProperty("java.security.krb5.conf", krpath)
                UserGroupInformation.setConfiguration(getConf)
                UserGroupInformation.loginUserFromKeytab(HBASE_PRINCIPAL, keytabPath)

                log.info(s"==> hbase sink user $HBASE_PRINCIPAL 登录成功")
                val loginedUser: User = User.create(UserGroupInformation.getLoginUser)
                hbaseConn = ConnectionFactory.createConnection(getConf, loginedUser)
            } else {
                hbaseConn = ConnectionFactory.createConnection(getConf)
            }


            //创建一个批量异步与hbase通信的对象
            val params: BufferedMutatorParams = new BufferedMutatorParams(TableName.valueOf(tableName)).writeBufferSize(WRITE_BUFFER_SIZE)
            mutator = hbaseConn.getBufferedMutator(params)
            iter.foreach((row: sql.Row) => {
                //动态生成rowkey
                var rowkey: Array[Byte] = null
                var rowkeyStr: String = rowkey_option.map((option: HBaseRKOption) => {
                    val field_name: String = option.colName
                    val data_type: String = option.data_type.get
                    var value: String = getColumnValue2(row, field_name, data_type, option.`type`)
                    option.`type` match {
                        case "reverse" => value = value.reverse
                        case _ => value = value
                    }
                    value
                }).mkString(ROWKEY_DELIMETER)

                if (rowkey_option_num != 0) {
                    val randomStr: String = ROWKEY_DELIMETER + generateRandomNum(rowkey_option_num)
                    rowkeyStr += randomStr
                }

                rowkey = Bytes.toBytes(rowkeyStr)

                val put = new Put(rowkey)
                var flag = false

                //拼接rowkey的列不存储
                if (!ROWKEY_COL_IS_STORE) {
                    val rkOptionNameList: List[String] = rowkey_option.map((rkOption: HBaseRKOption) => rkOption.colName)
                    columnInfoMetaList = columnInfoMetaList.filterNot((column: ExtractFieldInfo) => rkOptionNameList.contains(column.field))
                }

                for (i <- columnInfoMetaList.indices) {
                    val columnEntiy: ExtractFieldInfo = columnInfoMetaList(i)
                    val colName: String = columnEntiy.field
                    val colType: String = columnEntiy.data_type.toUpperCase()

                    val nameArr: Array[String] = colName.split(":", -1)
                    val family: Array[Byte] = Bytes.toBytes(nameArr(0).trim)
                    val qualifier: Array[Byte] = Bytes.toBytes(nameArr(1).trim)


                    val value: Array[Byte] = getColumnValue(row, colName, colType)
                    if (value != null) {
                        flag = true
                        put.addColumn(family, qualifier, value).setDurability(WAL_WAY)
                    }
                }
                if (flag) {
                    mutator.mutate(put)
                    accumulator.add(1)
                }
            })
            //不用每次put后就调用flush，最后调用就行，这个方法替代了旧api的hTable.setAutoFlush(false, false)
            mutator.flush()

            closeConn(hbaseConn, mutator)

        })
        hbaseDataStatusInfo.setDataNum(accumulator.value.toString)
    }

    /**
      * bulkLoad 方式写hbase
      *
      * @param kc        khaos上下文
      * @param rdd       要写入的数据
      * @param hFilePath hfile临时文件夹
      * @return void
      */
    def doBulkLoad(kc: KhaosContext, rdd: RDD[Put], hFilePath: String): Unit = {
        var hbaseConn: Connection = null
        var admin: Admin = null
        var table: Table = null
        try {
            val hbaseContext = new HBaseContext(kc.sparkSession.sparkContext, hbaseConf)
            //如果临时文件夹存在,则删除临时文件夹
            log.info(s"hbase sink bulkLoad tmp path : $hFilePath")
            deleteTempFolder(kc.sparkSession.sparkContext.hadoopConfiguration, hFilePath)
            log.info("hbase sink write hfile start")
            //      hbaseContext.bulkLoadThinRows(rdd, TableName.valueOf(tableName), (t: Put) => putForLoad(t), hFilePath)
            hbaseContext.bulkLoad[Put](rdd, TableName.valueOf(tableName), (t: Put) => putForLoad(t), hFilePath)
            log.info("hbase sink write hfile end")
            if (USE_KERBEROS) {
                val keytabPath: String = SparkFiles.get(HBASE_KEYTABPATH.split("/").last)
                UserGroupInformation.setConfiguration(hbaseConf)
                UserGroupInformation.loginUserFromKeytab(HBASE_PRINCIPAL, keytabPath)
                val loginedUser: User = User.create(UserGroupInformation.getLoginUser)
                hbaseConn = ConnectionFactory.createConnection(hbaseConf, null, loginedUser)
            } else {
                hbaseConn = ConnectionFactory.createConnection(hbaseConf)
            }
            admin = hbaseConn.getAdmin
            table = hbaseConn.getTable(TableName.valueOf(tableName))
            val load = new LoadIncrementalHFiles(hbaseConf)
            log.info("hbase sink load hfile start")
            load.doBulkLoad(new Path(hFilePath), admin, table, hbaseConn.getRegionLocator(TableName.valueOf(tableName)))
            log.info("hbase sink load hfile end")
        } catch {
            case e: Exception =>
                e.printStackTrace()
                log.error(s"hbase writer bulkLoad fail,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}")
                throw new Exception(s"hbase writer bulkLoad fail,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}", e)
        } finally {
            try {
                deleteTempFolder(kc.sparkSession.sparkContext.hadoopConfiguration, hFilePath)
            } catch {
                case e: Exception =>
                    log.error(s"delete bulkLoad tmp file fail,${e.getCause}")
            }
            try {
                closeConn(hbaseConn, admin, table)
            } catch {
                case e: Exception =>
                    log.error(s"close conn fail,${e.getCause}")
            }
        }
    }


    /** 转换bulkload所需rdd */
    def convertBulkLoadRDD(data: DataFrame): RDD[Put] = {
        data.rdd.mapPartitions(iter => {
            iter.map(row => {
                val rowkey: Array[Byte] = getRowKey(row)
                val put = new Put(rowkey)
                for (i <- columnInfoMetaList.indices) {
                    val columnEntiy: ExtractFieldInfo = columnInfoMetaList(i)
                    val colName: String = columnEntiy.field
                    val colType: String = columnEntiy.data_type.toUpperCase()

                    val nameArr: Array[String] = colName.split(":", -1)
                    val family: Array[Byte] = Bytes.toBytes(nameArr(0).trim)
                    val qualifier: Array[Byte] = Bytes.toBytes(nameArr(1).trim)

                    val value: Array[Byte] = getColumnValue(row, colName, colType)
                    if (value != null) {
                        put.addColumn(family, qualifier, value)
                    }
                }
                put
            })
        })
    }

    def putForLoad(put: Put): Iterator[(KeyFamilyQualifier, Array[Byte])] = {
        val ret: mutable.MutableList[(KeyFamilyQualifier, Array[Byte])] = mutable.MutableList()
        import scala.collection.JavaConversions._
        for (cells <- put.getFamilyCellMap.entrySet().iterator()) {
            val family: Array[Byte] = cells.getKey
            for (value <- cells.getValue) {
                val kfq = new KeyFamilyQualifier(CellUtil.cloneRow(value), family, CellUtil.cloneQualifier(value))
                ret.+=((kfq, CellUtil.cloneValue(value)))
            }
        }
        ret.iterator
    }

    /** 清空表数据 */
    def truncateTable(): Unit = {
        var hbaseConn: Connection = null
        var admin: Admin = null
        try {
            hbaseConn = ConnectionFactory.createConnection(getConf)
            admin = hbaseConn.getAdmin

            if (!admin.isTableDisabled(TableName.valueOf(tableName))) {
                admin.disableTable(TableName.valueOf(tableName))
            }
            admin.truncateTable(TableName.valueOf(tableName), false)
            if (admin.isTableDisabled(TableName.valueOf(tableName))) {
                admin.enableTable(TableName.valueOf(tableName))
            }
        } catch {
            case e: Exception =>
                e.printStackTrace()
                log.error(
                    s"""
                       |hbase writer write_option is ${hbaseConfig.write_option.getOrElse("")},
                       |truncate table fail,失败信息:" + ${e.getMessage} + "失败原因:" + ${e.getCause}
                     """.stripMargin)
                throw new Exception(
                    s"""
                       |hbase writer write_option is ${hbaseConfig.write_option.getOrElse("")},
                       |truncate table fail,失败信息:" + ${e.getMessage} + "失败原因:" + ${e.getCause}
                     """.stripMargin, e)

        } finally {
            closeConn(hbaseConn, admin)
        }
    }

    /** 获得广播变量中的hbaseConf */
    def getConf: Configuration = {
        var conf: Configuration = null
        try {
            conf = broadcastedConf.value.value
        }
        catch {
            case e: Exception =>
                e.printStackTrace()
                log.error(s"hbase writer Unable to getConfig from broadcast,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}")
                throw new Exception(s"hbase writer Unable to getConfig from broadcast,失败信息: ${e.getMessage}, 失败原因: ${e.getCause}", e)

        }
        conf
    }

    /** 获得ROW中对应的值的字节数组 */
    def getColumnValue(row: sql.Row, column_name: String, data_type: String): Array[Byte] = {
        var value: Array[Byte] = null
        data_type match {
            case ColumnType.STRING => if (row.getAs[String](column_name) != null) value = Bytes.toBytes(row.getAs[String](column_name))
            case ColumnType.NUMBER => if (row.getAs[Long](column_name) != null) value = Bytes.toBytes(row.getAs[Long](column_name))
            case ColumnType.DATE => if (row.getAs[DateType](column_name) != null) value = Bytes.toBytes(String.valueOf(row.getAs[DateType](column_name)))
            //case ColumnType.DECIMAL =>  if(row.getAs[Double](column_name) != null)      value=  Bytes.toBytes(row.getAs[Double](column_name))
            case ColumnType.DECIMAL => if (row.getAs[String](column_name) != null && java.lang.Double.valueOf(row.getAs[String](column_name)) != null) value = Bytes.toBytes(java.lang.Double.valueOf(row.getAs[String](column_name)))
            case ColumnType.TIME => if (row.getAs[String](column_name) != null) value = Bytes.toBytes(row.getAs[String](column_name))
            case ColumnType.DATETIME => if (row.getAs[String](column_name) != null) value = Bytes.toBytes(row.getAs[String](column_name))
            case _ => if (row.getAs[NullType](column_name) != null) value = Bytes.toBytes(String.valueOf(row.getAs[NullType](column_name)))
        }
        value
    }


    /** 获得ROW中对应的值的字符串 */
    def getColumnValue2(row: sql.Row, column_name: String, data_type: String, option: String): String = {
        var value: String = "null"
        data_type match {
            case ColumnType.STRING => if (row.getAs[String](column_name) != null) value = String.valueOf(row.getAs[String](column_name))
            case ColumnType.NUMBER => if (row.getAs[Long](column_name) != null) value = String.valueOf(row.getAs[Long](column_name))
            case ColumnType.DATE => if (row.getAs[DateType](column_name) != null) value = String.valueOf(row.getAs[DateType](column_name))
            //case ColumnType.DECIMAL => if(row.getAs[Double](column_name) != null)    value= String.valueOf(row.getAs[Double](column_name))
            case ColumnType.DECIMAL => if (row.getAs[String](column_name) != null) value = String.valueOf(row.getAs[String](column_name))
            case ColumnType.TIME => if (row.getAs[String](column_name) != null) value = String.valueOf(row.getAs[String](column_name))
            case ColumnType.DATETIME => if (row.getAs[String](column_name) != null) value = String.valueOf(row.getAs[String](column_name))
            case _ => if (row.getAs[NullType](column_name) != null) value = String.valueOf(row.getAs[NullType](column_name))
        }
        value
    }

    /** 转换dateframe中的timestamp类型 */
    def convertTimestamp(sinkSchema: List[ExtractFieldInfo], data: DataFrame): DataFrame = {
        var convertData: DataFrame = data
        for (i <- columnInfoMetaList.indices) {
            val entity: ExtractFieldInfo = columnInfoMetaList(i)
            val colName: String = entity.field
            val colType: String = entity.data_type
            colType match {
                case ColumnType.DATETIME => convertData = convertData.withColumn(colName, data.col(colName).cast(StringType))
                case _ => convertData = convertData
            }
        }

        convertData
    }

    /**
      * 根据数据管理规则生成rowkey
      **/
    def getRowKey(row: Row): Array[Byte] = {
        //动态生成rowkey
        var rowkey: Array[Byte] = null
        var rowkeyStr: String = rowkey_option.map(option => {
            val field_name: String = option.colName
            val data_type: String = option.data_type.get
            var value: String = getColumnValue2(row, field_name, data_type, option.`type`)
            option.`type` match {
                case "reverse" => value = value.reverse
                case _ => value = value
            }
            value
        }).mkString

        if (rowkey_option_num != 0) {
            val randomStr: String = generateRandomNum(rowkey_option_num)
            rowkeyStr += randomStr
        }

        rowkey = Bytes.toBytes(rowkeyStr)
        rowkey
    }

    /** 删除load过程中生成的临时文件夹 */
    def deleteTempFolder(hadoopConf: Configuration, hFilePath: String): Unit = {
        val fileSystem: FileSystem = FileSystem.get(hadoopConf)

        if (fileSystem.exists(new Path(hFilePath))) {
            fileSystem.delete(new Path(hFilePath), true: Boolean)
        }
    }

    /** 生成指定位数随机数 */
    def generateRandomNum(length: Int): String = {
        val lb = new Array[Int](length)
        for (i <- lb.indices) {
            lb(i) = Random.nextInt(9)
        }
        lb.mkString("")
    }

    /** 关闭连接 */
    def closeConn(conn: Connection, admin: Admin): Unit = {
        if (null != admin && !admin.isAborted)
            admin.close()
        if (null != conn && !conn.isClosed)
            conn.close()
        log.info(s"hbase writer conn && admin close")
    }

    /** 关闭连接 */
    def closeConn(conn: Connection, mutator: BufferedMutator): Unit = {
        if (null != mutator)
            mutator.close()
        if (null != conn && !conn.isClosed)
            conn.close()
        log.info(s"hbase writer conn && mutator close")
    }

    /** 关闭连接 */
    def closeConn(conn: Connection, admin: Admin, table: Table = null): Unit = {
        if (null != admin && !admin.isAborted)
            admin.close()
        if (null != table)
            table.close()
        if (null != conn && !conn.isClosed)
            conn.close()
        log.info(s"hbase writer conn && admin && table close")
    }
}
