package com.kingsoft.dc.khaos.module.spark.sink

import java.util
import java.util.Properties
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.extender.meta.model.ds.KafkaConnect
import com.kingsoft.dc.khaos.extender.meta.model.table.DmTable
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.KafkaConstants
import com.kingsoft.dc.khaos.module.spark.metadata.sink.{KafkaAdvancedOptions, KafkaSinkConfig}
import com.kingsoft.dc.khaos.module.spark.model.RelationDataStatusInfo
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.util.{CenterMetricUtils, DataframeUtils, FileUtils, MetaUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.SparkFiles
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.{Column, DataFrame}
import org.apache.spark.util.LongAccumulator
import org.json4s.jackson.JsonMethods.{compact, render}
import org.json4s.{DefaultFormats, JsonAST}

import java.io.{FileWriter, IOException}
import scala.collection.mutable.ArrayBuffer
import scala.util.Random

/**
 * create by yansu on 2020/08/27 11:13
 */
class KafkaSink extends SinkStrategy with Logging with Serializable {

  private val kafkaDataStatusInfo = new RelationDataStatusInfo
  private var _kafkaConfig: KafkaSinkConfig = null
  private var kafkaConnect: KafkaConnect = null
  private var _columnEntiy: util.List[DmTableColumn] = null
  private var tableEntiy: DmTable = null
  private var moduleId = ""
  private var principal = ""
  private var keytab = ""
  private var krb5Conf = ""
  private var defaultDs = true

  private var _common_public_kde_realm = "HADOOP.COM"
  // topic分区数
  private var topicPartitionSize: Int = 0
  // topic数据格式
  private var topicType: String = "csv"
  // csv格式数据分割符
  private var topicChar: String = ","

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JsonAST.JObject,
                    schema: Schema,
                    dataSet: DataFrame): Any = {

    implicit val formats = DefaultFormats
    val kafkaConfig = config.extract[KafkaSinkConfig]
    this._kafkaConfig = kafkaConfig
    this.moduleId = module_id

    MetaUtils.checkWriteAuth(kc,
      kafkaConfig.db_name,
      kafkaConfig.table_name,
      kafkaConfig.extender.auth.clazz,
      compact(render(kafkaConfig.extender.auth.params)))
    val properties = init(kc)
    kafkaProducer(kc, dataSet, properties)
  }

  def reStructDataFrame(data: DataFrame) = {
    var res = DataframeUtils.setDefaultValue(
      _kafkaConfig.extract_fields,
      _columnEntiy,
      data)
    res = DataframeUtils.convertDataType(_kafkaConfig.extract_fields, res)
    res
  }

  /**
   * 轮询方式写入kafka
   *
   * @param data
   * @param prop
   */
  def roundWrite(kc: KhaosContext, data: DataFrame, prop: Properties): Unit = {
    val accumulator: LongAccumulator = kc.sparkSession.sparkContext.longAccumulator("DataNumber" + "_" + "KafkaSink" + "_" + Random.nextInt(1000))
    topicType match {
      case "csv" => {
        data.foreachPartition(partition => {
          if (!kafkaConnect.getUseKrbs.isEmpty && kafkaConnect.getUseKrbs.toBoolean) {
            initJAASConf()
          }
          val kafkaProducer = new KafkaProducer[String, String](prop)
          partition.foreach(row => {
            val record = new ProducerRecord[String, String](_kafkaConfig.table_name.toLowerCase, row.mkString(topicChar))
            kafkaProducer.send(record)
            accumulator.add(1)
          })
          kafkaProducer.close()
        })
      }
      case "json" => {
        import org.apache.spark.sql.functions._
        val json_column_name = "json_column" + System.currentTimeMillis()
        val array = new ArrayBuffer[Column]()
        for (elem <- _kafkaConfig.extract_fields) {
          array += col(elem.field)
        }

        val kv = new util.HashMap[String, String]()
        kv.put("timestampFormat", "yyyy-MM-dd HH:mm:ss")
        data
          .withColumn(json_column_name, to_json(struct(array: _*), kv).cast(StringType))
          .foreachPartition(partition => {
            if (!kafkaConnect.getUseKrbs.isEmpty && kafkaConnect.getUseKrbs.toBoolean) {
              initJAASConf()
            }
            val kafkaProducer = new KafkaProducer[String, String](prop)
            partition.foreach(row => {
              val record = new ProducerRecord[String, String](_kafkaConfig.table_name.toLowerCase, row.getAs[String](json_column_name))
              accumulator.add(1)
              kafkaProducer.send(record)
            })
            kafkaProducer.close()
          })
      }
      case _ => {
        throw new IllegalArgumentException("暂不支持写入该格式数据!")
      }
    }
    kafkaDataStatusInfo.setDataNum(accumulator.value.toString())
    DataframeUtils.reportDataStatusRelation(kc, kafkaDataStatusInfo,
      _kafkaConfig.db_name,
      _kafkaConfig.table_name,
      _kafkaConfig.extender.meta.clazz,
      compact(render(_kafkaConfig.extender.meta.params)))
    //上报运维中心指标
    val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
    metric.setProcessDataLValue(kafkaDataStatusInfo.getDataNum.toLong)
    CenterMetricUtils.reportSyncProcessData(metric, kc)
  }

  /*
  hash方式写入kafka
   */
  def hashWrite(kc: KhaosContext, data: DataFrame, prop: Properties): Unit = {
    val accumulator: LongAccumulator = kc.sparkSession.sparkContext.longAccumulator("DataNumber" + "_" + "KafkaSink" + "_" + Random.nextInt(1000))
    topicType match {
      case "csv" => {
        data.foreachPartition(partition => {
          if (!kafkaConnect.getUseKrbs.isEmpty && kafkaConnect.getUseKrbs.toBoolean) {
            initJAASConf()
          }
          val kafkaProducer = new KafkaProducer[String, String](prop)
          partition.foreach(row => {
            val fieldIndex = row.fieldIndex(_kafkaConfig.hash_column)
            val hash_value = row.get(fieldIndex)
            if (null == hash_value) {
              val record = new ProducerRecord[String, String](_kafkaConfig.table_name.toLowerCase, null, row.mkString(topicChar))
              kafkaProducer.send(record)
              accumulator.add(1)
            } else {
              val record = new ProducerRecord[String, String](_kafkaConfig.table_name.toLowerCase, hash_value.toString, row.mkString(topicChar))
              kafkaProducer.send(record)
              accumulator.add(1)
            }
          })
          kafkaProducer.close()
        })
      }
      case "json" => {
        import org.apache.spark.sql.functions._
        val json_column_name = "json_column" + System.currentTimeMillis()
        val array = new ArrayBuffer[Column]()
        for (elem <- _kafkaConfig.extract_fields) {
          array += col(elem.field)
        }
        //        val columArr = data.columns
        val kv = new util.HashMap[String, String]()
        kv.put("timestampFormat", "yyyy-MM-dd HH:mm:ss")
        data
          .withColumn(json_column_name, to_json(struct(array: _*), kv).cast(StringType))
          .foreachPartition(partition => {
            if (!kafkaConnect.getUseKrbs.isEmpty && kafkaConnect.getUseKrbs.toBoolean) {
              initJAASConf()
            }
            val kafkaProducer = new KafkaProducer[String, String](prop)
            partition.foreach(row => {
              val fieldIndex = row.fieldIndex(_kafkaConfig.hash_column)
              val hash_value = row.get(fieldIndex)
              //              val json = new JSONObject()
              //              for (elem <- columArr) {
              //                println("column name: " + elem)
              //                json.put(elem, row.getAs(elem))
              //              }
              //              println("json==> " + json.toJSONString)
              if (null == hash_value) {
                val record = new ProducerRecord[String, String](_kafkaConfig.table_name.toLowerCase, null, row.getAs[String](json_column_name))
                kafkaProducer.send(record)
                accumulator.add(1)
              } else {
                val record = new ProducerRecord[String, String](_kafkaConfig.table_name.toLowerCase, hash_value.toString, row.getAs[String](json_column_name))
                kafkaProducer.send(record)
                accumulator.add(1)
              }
            })
            kafkaProducer.close()
          })
      }
      case _ => {
        throw new IllegalArgumentException("暂不支持写入该格式数据!")
      }
    }
    kafkaDataStatusInfo.setDataNum(accumulator.value.toString())
    DataframeUtils.reportDataStatusRelation(kc, kafkaDataStatusInfo,
      _kafkaConfig.db_name,
      _kafkaConfig.table_name,
      _kafkaConfig.extender.meta.clazz,
      compact(render(_kafkaConfig.extender.meta.params)))
    //上报运维中心指标
    val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
    metric.setProcessDataLValue(kafkaDataStatusInfo.getDataNum.toLong)
    CenterMetricUtils.reportSyncProcessData(metric, kc)
  }

  def kafkaProducer(kc: KhaosContext, data: DataFrame, prop: Properties): Unit = {
    var structDf = reStructDataFrame(data)
    implicit val formats = DefaultFormats
    var rePartitions = 0
    val advanced = _kafkaConfig.advanced_options.extract[KafkaAdvancedOptions]
    if (advanced.parallelism.on_off) {
      rePartitions = advanced.parallelism.write_parall
    }
    if (rePartitions != 0 && null != rePartitions) {
      structDf = structDf.repartition(rePartitions)
    }
    val nums = structDf.rdd.partitions.length
    log.info("生产者个数为: " + nums)
    val write_mode = _kafkaConfig.product_mode
    write_mode match {
      case "round" => {
        log.info("开始轮询写入TOPIC")
        roundWrite(kc, structDf, prop)
      }
      case "hash" => {
        log.info("开始哈希写入TOPIC")
        hashWrite(kc, structDf, prop)
      }
      case _ => {
        throw new Exception("暂不支持该写入方式!")
      }
    }
  }

  def init(kc: KhaosContext) = {
    initKafkaConf(kc)
  }

  /**
   * 初始化kafka配置
   *
   * @return Properties
   */
  def initKafkaConf(kc: KhaosContext): Properties = {
    loadProperties(kc)
    val hosts = getKafkaConnection(kc)
    val props = new Properties

    if (!kafkaConnect.getUseKrbs.isEmpty && kafkaConnect.getUseKrbs.toBoolean) {

      principal = kc.conf.getString("proxy.user")
      //内部kerberos
      if (defaultDs) {
        log.info("kafka 开启kerberos认证")
        keytab = principal + ".keytab"
        val keyTabAllPath = System.getenv("SPARK_YARN_STAGING_DIR") + "/" + keytab
        log.info("keyTabAllPath===>" + keyTabAllPath)
        kc.sparkSession.sparkContext.addFile(keyTabAllPath)
        System.setProperty(KafkaConstants.JAVA_SECURITY_KRB5_CONF, kc.conf.getString("proxy.krb5.conf"))
        //外部kerberos
      } else {
        log.info("kafka 开启外部kerberos认证")
        val keytabFile = kafkaConnect.getKeytabFile
        val krb5File = kafkaConnect.getKrb5File

        keytab = System.getenv("SPARK_YARN_STAGING_DIR") + "/kafka_sink_" + moduleId + ".keytab"
        krb5Conf = System.getenv("SPARK_YARN_STAGING_DIR") + "/kafka_sink_krb5_" + moduleId + ".conf"
        log.info("keyTabPath===>" + keytab)
        log.info("krb5Path===>" + krb5Conf)
        FileUtils.decoderBase64File(keytabFile, keytab, FileSystem.newInstance(new Configuration()))
        FileUtils.decoderBase64File(krb5File, krb5Conf, FileSystem.newInstance(new Configuration()))
        kc.sparkSession.sparkContext.addFile(keytab)
        System.setProperty(KafkaConstants.JAVA_SECURITY_KRB5_CONF, krb5Conf)
      }


      props.put(KafkaConstants.SECURITY_PROTOCOL, "SASL_PLAINTEXT")
      props.put(KafkaConstants.SASL_MECHANISM, "GSSAPI")
      props.put(KafkaConstants.SASL_KERBEROS_SERVICE_NAME, "kafka")

    }

    props.put(KafkaConstants.BOOTSTRAP_SERVER, hosts)
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    _kafkaConfig.acks match {
      case "0" => {
        props.put(KafkaConstants.ACKS, "0")
      }
      case "1" => {
        props.put(KafkaConstants.ACKS, "1")
      }
      case "2" => {
        props.put(KafkaConstants.ACKS, "all")
      }
    }
    props
  }


  def initJAASConf(): String = {
    var keytabPath = ""
    var principalRealm = ""
    if (defaultDs) {
      keytabPath = SparkFiles.get(keytab)
      principalRealm = principal + "@" + _common_public_kde_realm
    } else {
      keytabPath = SparkFiles.get(keytab.split("/").last)
      principalRealm = kafkaConnect.getPrincipal
    }
    log.info("keytabPath===>" + keytabPath)
    log.info("principalRealm===>" + principalRealm)
    val jaasPath = System.getenv("PWD") + "/kafka_sink_jaas.conf"
    log.info("jaasPath===>" + jaasPath)

    val jaasconf =
      s"""|KafkaClient {
          |  com.sun.security.auth.module.Krb5LoginModule required
          |  useTicketCache=false
          |  useKeyTab=true
          |  principal="$principalRealm"
          |  serviceName="kafka"
          |  keyTab="$keytabPath";
          |};
          |""".stripMargin
    log.info("jaasconf===>" + jaasconf)

    var writer: FileWriter = null
    try {
      writer = new FileWriter(jaasPath)
      writer.write(jaasconf)
      writer.flush()
    } catch {
      case e: IOException => {
        log.error(e.getMessage, e)
        throw new IOException(e.getMessage, e)
      }
    } finally {
      if (writer != null) writer.close()
    }
    System.setProperty(KafkaConstants.JAVA_SECURITY_AUTH_LOGIN_CONFIG, jaasPath)
  }


  /*
初始化配置文件
*/
  def loadProperties(kc: KhaosContext): Unit = {
    try {
      val kafkaProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.kafka.source.").toMap
      log.info("KafkaSink Properties")
      kafkaProperties.foreach { case (k, v) => log.info(k + "   " + v) }
      _common_public_kde_realm = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_COMMON_PUBLIC_KDE_REALM, "HADOOP.COM")
    } catch {
      case e: Exception =>
        log.error("未读取到Kafka配置! 改用默认配置")
    }
  }

  /**
   * 获取kafka集群物理地址
   *
   * @return kafka host
   */
  def getKafkaConnection(kc: KhaosContext) = {
    val className = _kafkaConfig.extender.meta.clazz
    val db_name = _kafkaConfig.db_name
    val topic_name = _kafkaConfig.table_name
    val kafkaMeta = MetaUtils.getKafkaMeta(kc,
      db_name,
      topic_name,
      className,
      compact(render(_kafkaConfig.extender.meta.params)),
      this)
    kafkaConnect = kafkaMeta.getDsKafkaConnect
    _columnEntiy = kafkaMeta.getColumnEntiy
    tableEntiy = kafkaMeta.getTableEntiy
    import scala.collection.JavaConverters._
    tableEntiy.getParams.asScala.foreach(map => {
      map.get("pKey") match {
        case KafkaConstants.DATA_FORMAT => topicType = map.get("pValue").toString
        case KafkaConstants.PARTITION_SIZE => topicPartitionSize = map.get("pValue").toString.toInt
        case KafkaConstants.CSV_DELIMITER => topicChar = map.get("pValue").toString
        case _ => {}
      }
    })

    defaultDs = kafkaMeta.getDefaultDs
    kafkaConnect.getServerAddress
  }
}
