package com.kingsoft.dc.khaos.module.spark.sink

import java.io.{BufferedReader, InputStreamReader}
import java.util.UUID

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{ColumnType, CommonConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.{ExtractFieldInfo, PhoenixConfig}
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, RelationDataStatusInfo}
import com.kingsoft.dc.khaos.module.spark.util.{CenterMetricUtils, DataframeUtils, FileUtils, MetaUtils}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.HConstants
import org.apache.spark.{SerializableWritable, SparkFiles}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.util.LongAccumulator
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, render}
//import org.apache.phoenix.spark._

/**
  * @Author: Chris J
  * @Date: 2020-09-02.
  */
class PhoenixSink extends SinkStrategy with Serializable {

  private var phoenixConfig: PhoenixConfig = _
  private var sparkSession: SparkSession = _
  private var nameSpace: String = _
  private var tableName: String = _
  private var jdbcTableName: String = _
  private var bulkLoadTableName: String = _
  private var bulkLoadSchemaName: String = _
  private var columnInfoMetaList: List[ExtractFieldInfo] = Nil
  private var columnEntiy: java.util.List[DmTableColumn] = null
  private val FORMAT: String = "org.apache.phoenix.spark"
  private var HBASE_ZOOKEEPER_QUORUM: String = _
  private var ZOOKEEPER_ZNODE_PARENT: String = _
  private var zkUrl: String = _
  private var SAVE_MODE: String = "append"
  private var writeParall: Int = 0

  @transient private var phoenixConf: Configuration = _
  private var broadcastedConf: Broadcast[SerializableWritable[Configuration]] = _
  private val phoenixDataStatusInfo = new RelationDataStatusInfo
  private var hfileDataPath: String = _
  private var shellZkUrl: String = _
  private var shouldProxy: String = _
  private var proxyUser: String = _
  private var resourceQueue: String = _
  private var writeMode: String = "jdbc" // 默认jdbc写

  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JObject,
                    schema: Schema,
                    dataFrame: DataFrame): this.type = {
    init(kc, config)
    doWrite(kc, dataFrame)
    this
  }

  /** 初始化参数 */
  def init(kc: KhaosContext, config: JObject): Unit = {
    implicit val formats = DefaultFormats
    phoenixConfig = config.extract[PhoenixConfig]
    //权限校验
    val checkResult = MetaUtils.checkWriteAuth(kc,
      phoenixConfig.db_name,
      phoenixConfig.table_name,
      phoenixConfig.extender.auth.clazz,
      compact(render(phoenixConfig.extender.auth.params)))

    if (!checkResult) {
      log.error(s"phoenix writer fail, 权限验证未通过")
      throw new Exception(s"phoenix writer fail, 权限验证未通过")
    }

    sparkSession = kc.sparkSession

    nameSpace = phoenixConfig.db_name
    tableName = phoenixConfig.table_name

    jdbcTableName = "\"" + nameSpace + "\".\"" + tableName + "\""

    bulkLoadSchemaName = "\"" + phoenixConfig.db_name + "\""
    bulkLoadTableName = "\"" + phoenixConfig.table_name + "\""

    val phoenixWriteParall = phoenixConfig.advanced_options.parallelism.get
    if (phoenixWriteParall != null) {
      if (phoenixWriteParall.on_off.getOrElse(false)) {
        writeParall = phoenixWriteParall.write_parall.getOrElse(-1)
      }
    }
    columnInfoMetaList = phoenixConfig.extract_fields

    initMeta(kc)
  }

  /** 初始化phoenix相关参数 */
  def initMeta(kc: KhaosContext): Unit = {
    implicit val formats = DefaultFormats
    //获取元数据
    val entity: MetaDataEntity = MetaUtils.getPhoenixMeta(kc,
      phoenixConfig.db_name,
      phoenixConfig.table_name,
      phoenixConfig.extender.meta.clazz,
      compact(render(phoenixConfig.extender.meta.params)),
      this)
    val connect = entity.dsPhoenixConnect
    columnEntiy = entity.columnEntiy

    phoenixConf = new Configuration()
    HBASE_ZOOKEEPER_QUORUM = connect.getZkAddresses
    ZOOKEEPER_ZNODE_PARENT = connect.getZookeeper_znode_parent

    shouldProxy = kc.conf.getString(SchedulerConstants.SHOULD_PROXY)
    proxyUser = kc.conf.getString(SchedulerConstants.PROXY_USER)
    resourceQueue = kc.conf.getString(SchedulerConstants.DTS_RESOURCE_GROUP)
    hfileDataPath = System.getenv("SPARK_YARN_STAGING_DIR") + "/" + UUID.randomUUID.toString + "/data/"
    val useKrbs = connect.getUseKrbs.toBoolean

    var krbs = ""
    if (useKrbs) {
      val principal = connect.getPrincipal
      var keytabFile: String = connect.getKeytabFile
      val krb5File: String = connect.getKrb5File
      //      val keytabPath: String = System.getenv("SPARK_YARN_STAGING_DIR") + "/hbase_sink.keytab"
      //      val krb5Path: String = System.getenv("SPARK_YARN_STAGING_DIR") + "/krb5.conf"
      val tmpPath = UUID.randomUUID.toString
      var keytabPath: String = s"/tmp/$tmpPath/hbase_sink.keytab"
      val krb5Path: String = s"/tmp/$tmpPath/krb5.conf"

      FileUtils.decoderBase64File(keytabFile, keytabPath, FileSystem.get(sparkSession.sparkContext.hadoopConfiguration))
      FileUtils.decoderBase64File(krb5File, krb5Path, FileSystem.get(sparkSession.sparkContext.hadoopConfiguration))

      kc.sparkSession.sparkContext.addFile(s"hdfs://$keytabPath")
      //      kc.sparkSession.sparkContext.addFile(krb5Path)
      keytabPath = SparkFiles.get(keytabPath.split("/").last)
      //      SparkFiles.get(krb5Path)
      val rootDir = SparkFiles.getRootDirectory()
      keytabFile = s"$rootDir/hbase_sink.keytab"
      log.info(s"spark driver tmp: $rootDir")
      log.info(s"=> principal: $principal")
      log.info(s"=> krb5Path: $krb5Path")
      log.info(s"=> keytabPath: $keytabPath")
      krbs = s":$principal:$keytabPath"
    }
    //企业云2.0phoenix数据源配置zookeeper地址自带端口号2181
    if (HBASE_ZOOKEEPER_QUORUM.contains(":")) {
      zkUrl = s"jdbc:phoenix:$HBASE_ZOOKEEPER_QUORUM:$ZOOKEEPER_ZNODE_PARENT$krbs"
      shellZkUrl = s"$HBASE_ZOOKEEPER_QUORUM:$ZOOKEEPER_ZNODE_PARENT"
    } else {
      zkUrl = s"jdbc:phoenix:$HBASE_ZOOKEEPER_QUORUM:2181:$ZOOKEEPER_ZNODE_PARENT$krbs"
      shellZkUrl = s"$HBASE_ZOOKEEPER_QUORUM:2181:$ZOOKEEPER_ZNODE_PARENT"
    }

    log.info(s"==> zkUrl: $zkUrl")
    log.info(s"==> shellZkUrl: $shellZkUrl")
  }


  /** 写入数据 */
  def doWrite(kc: KhaosContext, df: DataFrame): Any = {
    val decimalSwitch = kc.conf.getBoolean(CommonConstants.PHOENIX_DECIMAL_SWITCH, false)
    var convertDataFrame: DataFrame = DataframeUtils.setDefaultValue(columnInfoMetaList, columnEntiy, df)
    convertDataFrame = DataframeUtils.convertDataType4Phoenix(columnInfoMetaList, convertDataFrame, decimalSwitch)
    convertDataFrame = DataframeUtils.sortDataCol(convertDataFrame, columnEntiy)


    if (writeMode == "jdbc") {
      val phoenixConf = new Configuration()
      val maxSize = phoenixConf.getLong("phoenix.mutate.maxSize", 500000)
      val maxSizeMB = phoenixConf.getLong("phoenix.mutate.maxSizeBytes", 104857600) / 1024 / 1024
      log.info(s"==> maxSize: $maxSize, maxSizeMB: $maxSizeMB")

      val dfSize = DataframeUtils.estimateDFSize(convertDataFrame, columnInfoMetaList)
      val perTaskMaxSize = (dfSize._1 / maxSize).toInt
      val perTaskBatchSize = (dfSize._2 / maxSizeMB).toInt
      var minTaskSize = if (perTaskMaxSize > perTaskBatchSize) perTaskMaxSize else perTaskBatchSize
      val isRepartition = if (minTaskSize == 0) false else true // 如果数据量1个task可以搞定就不用reparation,否则需要reparation
      minTaskSize = if (minTaskSize == 0) 1 else minTaskSize + 1 // 防止为0后,repartition(0);除法会把minTaskSize变小 +1 后防止task超过提交数据量

      // 写并行度要求 大于最小task数(minTaskSize),保证单个task提交量不超过默认值;
      if (isRepartition || writeParall > 0) {
        log.info("==> phoenix user set writeParall: " + writeParall.toString)
        writeParall = if (minTaskSize > writeParall) minTaskSize else writeParall
        log.info("==> phoenix final writeParall: " + writeParall.toString)
        convertDataFrame = convertDataFrame.repartition(writeParall)
      }
    }


    var (tmpDf, accumulator): (DataFrame, LongAccumulator) = DataframeUtils.calculateDataNum(kc, convertDataFrame, "phoenixSink")

    // phoenix 对大小写敏感,如果字段不加引号,提交上去都是大写,所以写入前把df的schema字段信息都加上引号,防止phoenix都转成大写
    printDFSchema(tmpDf, "before")
    val cols = tmpDf.schema.map(c => {
      "\"" + c.name.toString + "\""
    })
    tmpDf = tmpDf.toDF(cols: _*)
    printDFSchema(tmpDf, "after")

    val conf = new Configuration()
    conf.setBoolean("phoenix.schema.isNamespaceMappingEnabled", true)
    conf.setBoolean("phoenix.schema.mapSystemTablesToNamespace", true)

    if (writeMode == "bulkLoad") {
      tmpDf.write.csv(s"$hfileDataPath")
      try {
        conf.set(HConstants.ZOOKEEPER_QUORUM, HBASE_ZOOKEEPER_QUORUM)
        conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, ZOOKEEPER_ZNODE_PARENT)
        conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "2181")
        val indexTableName = getIndexTableName()
        log.info("schema={}, table={}, input={}, index_table={}, shellZkUrl={}, shouldProxy={}, proxyUser={}, resourceQueue={}", bulkLoadSchemaName, bulkLoadTableName, hfileDataPath, indexTableName, shellZkUrl, shouldProxy, proxyUser, resourceQueue)
        execShell(indexTableName)
      } catch {
        case exception: Exception => throw exception
      }
    } else {
      tmpDf.write
        .format(FORMAT)
        .mode(SaveMode.Overwrite)
        .option("table", jdbcTableName)
        .option("driver", "org.apache.phoenix.jdbc.PhoenixDriver")
        .option("zkUrl", zkUrl) // ip,ip,ip:port:/hbase-secure
        .save()
    }

    phoenixDataStatusInfo.setDataNum(accumulator.value.toString())
    //上报数据状态
    log.info("==> phoenix doWrite: " + accumulator.value.toString)

    DataframeUtils.reportDataStatusRelation(kc, phoenixDataStatusInfo,
      nameSpace,
      phoenixConfig.table_name,
      phoenixConfig.extender.meta.clazz,
      compact(render(phoenixConfig.extender.meta.params)))
    //上报运维中心指标
    val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
    metric.setProcessDataLValue(phoenixDataStatusInfo.getDataNum.toLong)
    CenterMetricUtils.reportSyncProcessData(metric, kc)
  }

  def getIndexTableName(): String = {
    import java.sql.DriverManager
    val con = DriverManager.getConnection(zkUrl)
    val sql = s"select COLUMN_FAMILY as INDEX_NAME  from SYSTEM.CATALOG where TABLE_SCHEM = '$nameSpace' and TABLE_NAME= '$tableName' and TABLE_TYPE='i'"
    log.info("get index sql => " + sql)
    val statement = con.prepareStatement(sql)
    val rset = statement.executeQuery
    var indexTable = ""
    while (rset.next()) {
      indexTable = "\"" + rset.getString("INDEX_NAME") + "\""
      logInfo("indexTable=> " + indexTable)
    }
    statement.close
    indexTable
  }

  def execShell(indexTableName: String) = {
    //  mrBulkLoad.sh""hbase_0709_01"" ""phoenix_error"" inpath "phoenix_error_index" 10.77.0.8,10.69.67.18:2181:/hbase-secure /etc/security/keytabs/11A79D7138C20B383D8A92C9F2C55F4F.keytab
    val p = new ProcessBuilder("/bin/bash", "mrBulkLoad.sh", bulkLoadSchemaName, bulkLoadTableName, hfileDataPath, indexTableName, shellZkUrl, shouldProxy, proxyUser, resourceQueue)
    val p2 = p.start()
    val br1 = new BufferedReader(new InputStreamReader(p2.getInputStream()))
    val br2 = new BufferedReader(new InputStreamReader(p2.getErrorStream()))
    val exitCode = p2.waitFor()

    var line1: String = ""
    while ( {
      line1 = br1.readLine();
      line1 != null
    }) {
      logInfo("getInputStream => " + line1)
    }
    var line2: String = ""
    while ( {
      line2 = br2.readLine();
      line2 != null
    }) {
      logInfo("getErrorStream => " + line2)
    }

    if (exitCode == 0) {
      logInfo("bulkLoad successfully.")
    } else {
      logWarning("bulkLoad failed!")
      deleteHdfsPath(hfileDataPath)
      throw new Exception("bulkLoad failed!")
    }
  }

  def printDFSchema(df: DataFrame, prefix: String): Unit = {
    df.schema.foreach(f => {
      log.info(s"=> $prefix: " + f.name + ", " + f.dataType)
    })
  }

  def deleteHdfsPath(filePath: String): Unit = {
    val path = new Path(filePath)
    val hadoopConf = sparkSession.sparkContext.hadoopConfiguration
    val hdfs = org.apache.hadoop.fs.FileSystem.get(hadoopConf)
    if (hdfs.exists(path)) {
      hdfs.delete(path, true)
      logInfo(s"delete $filePath successfully.")
    }
  }


  /** 转换dateframe中的timestamp类型 */
  def convertTimestamp(sinkSchema: List[ExtractFieldInfo], data: DataFrame): DataFrame = {
    var convertData = data
    for (i <- columnInfoMetaList.indices) {
      val entity: ExtractFieldInfo = columnInfoMetaList(i)
      val colName: String = entity.field
      val colType: String = entity.data_type
      colType match {
        case ColumnType.DATETIME => convertData = convertData.withColumn(colName, data.col(colName).cast(StringType))
        case _ => convertData = convertData
      }
    }

    convertData
  }


}
