package com.kingsoft.dc.khaos.module.spark.sink


import java.util.Properties
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.ds.PGSqlConnect
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.metadata.sink.PGSqlSinkConfig
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, RelationDataStatusInfo}
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.request.model.JdbcConnectEntity
import com.kingsoft.dc.khaos.module.spark.util.{CenterMetricUtils, DataframeUtils, MetaUtils, TableSplitUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.spark.sql.types.TimestampType
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.json4s.{DefaultFormats, JsonAST}
import org.json4s.jackson.JsonMethods.{compact, render}

import scala.collection.mutable


/**
 * create by wuxiang on 20/12/18
 */
class PGSqlSink extends SinkStrategy with Logging {
  private var connectEntity: JdbcConnectEntity = null
  private var _pgsql_Meta: MetaDataEntity = _
  private var _pgsql_Config: PGSqlSinkConfig = null
  private var kc: KhaosContext = null
  private var host: String = ""
  private var port: String = ""
  private var username: String = ""
  private var password: String = ""
  private var instanceName: String = ""
  private var dbName = ""
  private var tblName = ""
  private var tblNameAndDF: mutable.HashMap[String, DataFrame] = null
  private val writeModeMap = Map("append" -> SaveMode.Append,
    "overwrite" -> SaveMode.Overwrite)

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JsonAST.JObject,
                    schema: Schema,
                    dataFrame: DataFrame): this.type = {

    implicit val formats = DefaultFormats
    val pgsqlConfig = config.extract[PGSqlSinkConfig]
    this._pgsql_Config = pgsqlConfig
    this.kc = kc
    //库表名加引号
    dbName = s"""\"${_pgsql_Config.db_name}\""""
    tblName = s"""\"${_pgsql_Config.table_name}\""""
    init(kc)
    writeRealTable(dataFrame)
    this
  }

  def writeRealTable(data: DataFrame) = {
    val pgsqlDataStatusInfo = new RelationDataStatusInfo
    // 设置默认值
    val sdvData = DataframeUtils.setDefaultValue(_pgsql_Config.extract_fields, _pgsql_Meta.columnEntiy, data)
    //    val convertDF = DataframeUtils.convertDataType(_pgsql_Config.extract_fields, sdvData)
    val tuple = DataframeUtils.buildNewDataframePGSql(_pgsql_Config.table_name,
      _pgsql_Config.extract_fields,
      sdvData,
      connectEntity)
    val targetData = tuple._1
    log.info("===> " + targetData.schema.fields.mkString(","))

    tblNameAndDF = TableSplitUtils.getSinkRealTable(kc, dbName, tblName, this, _pgsql_Config.extender.meta.clazz,
      compact(render(_pgsql_Config.extender.meta.params)), targetData, null)

    // 取出分区表名和DataFrame
    for (tblDF <- tblNameAndDF) {
      val numPartition = DataframeUtils.rePartitions(kc, data, _pgsql_Meta.columnEntiy)
      // 上报数据相关
      val (resultData, accumulator) = DataframeUtils.calculateDataNum(kc, tblDF._2, "PGSqlSink")
      write2PGSql(kc, tblDF._1, resultData, numPartition)

      var numTemp: Long = 0
      if (pgsqlDataStatusInfo.getDataNum == null) {
        numTemp = accumulator.value.toLong
      } else {
        numTemp = pgsqlDataStatusInfo.getDataNum.toLong + accumulator.value.toLong
      }
      pgsqlDataStatusInfo.setDataNum(numTemp.toString)
    }
    //上报数据状态
    pgsqlDataStatusInfo.setCover(if (writeModeMap(_pgsql_Config.write_option.toLowerCase) == "overwrite") true else false)
    DataframeUtils.reportDataStatusRelation(kc,
      pgsqlDataStatusInfo,
      _pgsql_Config.db_name,
      _pgsql_Config.table_name,
      _pgsql_Config.extender.meta.clazz,
      compact(render(_pgsql_Config.extender.meta.params)))
    //上报运维中心指标
    val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
    metric.setProcessDataLValue(pgsqlDataStatusInfo.getDataNum.toLong)
    CenterMetricUtils.reportSyncProcessData(metric, kc)
  }

  /** 初始化参数 */
  def init(kc: KhaosContext): Unit = {
    implicit val formats: DefaultFormats.type = DefaultFormats
    //权限校验
    checkReadRight(kc)

    //初始化pgsql connect
    initPGSqlConnect(kc)

  }


  /**
   * 检查是否有读权限
   *
   * @param kc
   */
  def checkReadRight(kc: KhaosContext): Unit = {
    val checkResult: Boolean = MetaUtils.checkReadAuth(kc,
      _pgsql_Config.db_name,
      _pgsql_Config.table_name,
      _pgsql_Config.extender.auth.clazz,
      compact(render(_pgsql_Config.extender.auth.params)))
    if (!checkResult) {
      log.error(s"pgsql reader init failed, 权限验证未通过!")
      throw new Exception(s"pgsql reader init failed, 权限验证未通过!")
    }
  }


  def initPGSqlConnect(kc: KhaosContext): Unit = {
    _pgsql_Meta = MetaUtils.getPGSqlMeta(kc,
      _pgsql_Config.db_name,
      _pgsql_Config.table_name,
      _pgsql_Config.extender.meta.clazz,
      compact(render(_pgsql_Config.extender.meta.params)),
      this)

    val connect: PGSqlConnect = _pgsql_Meta.dsPGSqlConnect
    instanceName = connect.getInstanceName
    host = connect.getHost
    port = connect.getPort
    username = connect.getUserName
    password = connect.getPassWord

    this.connectEntity = new JdbcConnectEntity(getConnectUrl(),
      username,
      password,
      _pgsql_Config.db_name,
      _pgsql_Config.table_name)
  }

  /**
   * @param kc   KhaosContext
   * @param data DataFrame
   */
  def write2PGSql(kc: KhaosContext, tableName: String, data: DataFrame, numPartitions: Int) = {
    val timeNoNullDataFrame = processingTimeType(data)
    val partitions = data.rdd.partitions.length
    log.info("Start Writing PGSql Database!")
    val dbTable = s"${dbName}.${tableName}"
    val prop = new Properties()
    prop.put("driver", "org.postgresql.Driver")
    prop.put("user", username)
    prop.put("password", password)
    if (partitions >= numPartitions) {
      timeNoNullDataFrame
        .write
        .mode(writeModeMap.apply(_pgsql_Config.write_option.trim.toLowerCase))
        .jdbc(getConnectUrl(), dbTable.trim, prop)
    } else {
      timeNoNullDataFrame
        .coalesce(numPartitions)
        .write
        .mode(writeModeMap.apply(_pgsql_Config.write_option.trim.toLowerCase))
        .jdbc(getConnectUrl(), dbTable.trim, prop)
    }

  }

  /**
   * 将DataFrame里的时间类型转换为Timestamp类型
   *
   * @param data DataFrame
   * @return DataFrame
   */
  def processingTimeType(data: DataFrame): DataFrame = {
    var df = data
    for (elem <- _pgsql_Config.extract_fields) {
      // 读取的时间类型在DataFrame中是String类型,转换成TimestampType类型.
      if (elem.data_type.equalsIgnoreCase("TIME")) {
        df = df.withColumn(elem.field, df.col(elem.field).cast(TimestampType)).as(elem.field)
      }
    }
    df
  }


  /**
   * get jdbc
   *
   * @return url
   */
  def getConnectUrl(): String = {
    val url = s"jdbc:postgresql://${host}:${port}/${instanceName}"
    log.info("url ==> " + url)
    url
  }


}
