package com.kingsoft.dc.khaos.module.spark.sink

import java.util

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, MetaDataConstants, MppConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.MppSinkConfig
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.model.cos.CosAccessConfig
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, RelationDataStatusInfo}
import com.kingsoft.dc.khaos.module.spark.request.model.{JdbcConnectEntity, StructFieldEntity}
import com.kingsoft.dc.khaos.module.spark.util.CosApiUtils.{createBucketIfNotExist, getCosAccessAkSk}
import com.kingsoft.dc.khaos.module.spark.util.TableSplitUtils.StrategyValueEnum
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.Logging
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame}
import org.json4s.jackson.JsonMethods.{compact, render}
import org.json4s.{DefaultFormats, JsonAST}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
 * create by yansu on 2019/06/22 11:17
 */
class MppSink extends SinkStrategy with Logging {
  private var connectEntity: JdbcConnectEntity = null
  private var _kc: KhaosContext = null
  private var _mpp_Config: MppSinkConfig = null
  private var _mppColsInfo: util.List[DmTableColumn] = null

  // mpp数据库物理地址
  private var _host: String = null
  private var _port: String = null
  private var _username: String = null
  private var _password: String = null
  private var _instansename: String = null
  private var dbName = ""
  private var tblName = ""

  // 配置文件
  private var _loginTimeout: Int = 6000
  private var _region = "ap-beijing"
  private var _bucket = "1"
  private var _endpoint = "cos.ap-beijing.myqcloud.com"
  private var _delimeter = ";"

  private var tblNameAndDF: mutable.HashMap[String, DataFrame] = null
  // cos连接
  private var cosConfig: CosAccessConfig = new CosAccessConfig
  private var cosFs: FileSystem = null
  //cos 临时存储数据目录
  private val COS_WORK_DIR = "/di/.working_input"
  private val OSS_TYPE = "cos"
  private var mppDataStatusInfo: RelationDataStatusInfo = null
  //  orc文件类型对应的外部表类型
  //  Type Mapping Supported
  //  External Data Type  |  Orc Data Type
  //  --------------------------------------------------------------------
  //  bool                |  boolean, bigint, int, smallint, tinyint, date
  //  int,int2,int4,int8  |  boolean, bigint, int, smallint, tinyint, date
  //  date                |  boolean, bigint, int, smallint, tinyint, date
  //  float4,float8       |  float,   double
  //  interval            |  string,  varchar
  //  time                |  string,  varchar
  //  timestamp           |  timestamp
  //  numeric             |  decimal
  //  char(n),varchar(n)  |  binary,  char, varchar, string
  //  bytea,text          |  binary,  char, varchar, string

  private val ORC_FORMAT = "orc"
  private val CSV_FORMAT = "csv"
  private val DELIMETER = "|"
  private var tableSplit: DmTableSplit = null
  private val currtime = System.currentTimeMillis()
  private var _dataSourceType: String = ""
  private var mppMeta: mutable.HashMap[String, StructFieldEntity] = null

  object SourceMode {
    val HashData = "hashdata" //mpp
    val GreenPlum = "greenplum"
    val HAWQ = "hawq"
  }

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JsonAST.JObject,
                    schema: Schema,
                    dataSet: DataFrame): Any = {

    // 解析config
    implicit val formats = DefaultFormats
    val mppSinkConfig = config.extract[MppSinkConfig]
    this._mpp_Config = mppSinkConfig

    //库表名加引号
    dbName = s"""\"${_mpp_Config.db_name}\""""
    tblName = s"""\"${_mpp_Config.table_name}\""""

    //权限校验
    MetaUtils.checkWriteAuth(kc,
      _mpp_Config.db_name,
      _mpp_Config.table_name,
      mppSinkConfig.extender.auth.clazz,
      compact(render(mppSinkConfig.extender.auth.params)))

    this._kc = kc
    // 初始化配置
    init()
    log.info("SourceMode ==> " + _dataSourceType)
    _dataSourceType.toLowerCase match {
      case SourceMode.HashData => {
        log.info("Mpp KDW_for_GreenPlum DataSource Synchronization!")
        // 临时缓存目录 /di/.working_input/1820/*
        val workDir = s"${COS_WORK_DIR}/${_kc.conf.getString("job.inst.id")}"
        try {
          loadToCosAndMpp(dataSet, workDir)
        } catch {
          case e: Exception =>
            e.printStackTrace()
            throw e
        } finally {
          // 关闭文件系统
          if (cosFs != null) {
            log.info("关闭文件系统!")
            cosFs.close()
          }
        }
      }
      case SourceMode.GreenPlum => {
        log.info("greenplum datasource synchronization!")
        val greenPlumSink = new GreenPlumSink
        val dataSourceConfig = Map.apply(
          "host" -> _host,
          "port" -> _port,
          "username" -> _username,
          "password" -> _password,
          "instansename" -> _instansename)
        greenPlumSink.sink(kc, module_id, config, schema, dataSet, dataSourceConfig, _mppColsInfo)
      }
      case SourceMode.HAWQ => {
        log.info("KDW_for_HAWQ datasource synchronization!")
        val hawqSink = new HAWQSink
        val dataSourceConfig = Map.apply(
          "host" -> _host,
          "port" -> _port,
          "username" -> _username,
          "password" -> _password,
          "instansename" -> _instansename)
        hawqSink.sink(kc, module_id, config, schema, dataSet, dataSourceConfig, _mppColsInfo)
      }
      case _ => throw new IllegalArgumentException(s"Unknown Data Source: ${_dataSourceType}!")
    }
  }

  /**
   * 初始化
   */
  def init(): Unit = {
    initMetaData()
    if (_dataSourceType.equalsIgnoreCase(SourceMode.HashData)) {
      initCosConfig()
      initJdbcConnectInfo()
    }
  }

  /**
   * 初始化写入的数据表的元数据信息
   */
  def initMetaData() = {

    log.info("初始化物理地址...")
    val dbname = _mpp_Config.db_name
    val tblname = _mpp_Config.table_name
    val className = _mpp_Config.extender.meta.clazz
    val mppMeta: MetaDataEntity = MetaUtils.getMPPMeta(_kc, dbname, tblname, className, compact(render(_mpp_Config.extender.auth.params)), this)
    val mppConnect = mppMeta.getDsMppConnect
    _mppColsInfo = mppMeta.getColumnEntiy
    _host = mppConnect.getHost
    _port = mppConnect.getPort
    _username = mppConnect.getUsername
    _password = mppConnect.getPassword
    _instansename = mppConnect.getInstanceName
    _dataSourceType = mppConnect.getSourceMode

    if (mppMeta.tableEntiy.getTblType.equalsIgnoreCase(MetaDataConstants.VIEW)) {
      throw new Exception(s"暂不支持写入视图类型表 ${dbname}.${tblname}")
    }
  }

  /**
   * 初始化用于中间缓存数据的连接Cos信息
   */
  def initCosConfig(): Unit = {
    log.info("初始化COS文件系统...")
    loadProperties(_kc)
    val cosAccessConfig = getCosAccessAkSk(_kc)
    cosAccessConfig.setRegion(_region)
    cosAccessConfig.setEndPoint(_endpoint)
    cosAccessConfig.setBucket(_bucket + "-" + cosAccessConfig.getAppId)
    //    cosAccessConfig.setWorkingDir("/di/tmp")
    cosAccessConfig.setDelimeter(_delimeter)
    cosConfig = cosAccessConfig
    log.info("开始加载COS文件系统...")
    createBucketIfNotExist(cosConfig)
    cosFs = FileSystem.get(addCosFileSystem())
  }

  /*
初始化配置文件
 */
  def loadProperties(kc: KhaosContext): Unit = {
    try {
      val mppProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.mpp.sink.").toMap
      log.info("MPPSink Properties")
      mppProperties.foreach { case (k, v) => log.info(k + "   " + v) }
      _loginTimeout = mppProperties.getOrElse(MppConstants.MODULE_MPP_SINK_JDBC_CONNECT_TIMEOUT, MppConstants.DEFAULT_CONNECT_TIMEOUT).toInt

      _bucket = mppProperties.getOrElse(MppConstants.MODULE_MPP_SINK_TMP_COS_BUCKET, "none")
      _endpoint = mppProperties.getOrElse(MppConstants.MODULE_MPP_SINK_TMP_COS_ENDPOINT, "none")
      _region = mppProperties.getOrElse(MppConstants.MODULE_MPP_SINK_TMP_COS_REGION, "none")
      _delimeter = mppProperties.getOrElse(MppConstants.MODULE_MPP_SINK_TMP_COS_DELIMETER, "none")

    } catch {
      case e: Exception =>
        log.error("未读取到MPP配置! 改用默认配置")
    }
  }


  /**
   * 初始化jdbc连接
   */
  def initJdbcConnectInfo() = {
    log.info("初始化JDBC...")
    val url = getConnectUrl()
    val user = _username
    val password = _password
    this.connectEntity = new JdbcConnectEntity(url,
      user,
      password,
      dbName,
      tblName)

  }

  /**
   * 将数据缓存到cos中
   *
   * @param data    需要导入的DataFrame
   * @param workDir cos路径
   * @return 是否成功
   */
  def loadToCosAndMpp(data: DataFrame,
                      workDir: String) = {
    try {
      // 是否是分表
      tableSplit = TableSplitUtils.getTableSplit(_kc,
        _mpp_Config.db_name,
        _mpp_Config.table_name,
        _mpp_Config.extender.meta.clazz,
        compact(render(_mpp_Config.extender.auth.params)))
      var splitValues: List[String] = null
      var newSplitValues: Map[String, String] = null
      var tblList: List[String] = null

      // 改名设置默认值
      var colArr = new ArrayBuffer[Column]()
      val sdvData = DataframeUtils.setDefaultValue(_mpp_Config.extract_fields, _mppColsInfo, data)
      for (elem <- sdvData.columns) {
        colArr += sdvData.col(elem)
      }
      // 获取分表列表
      if (tableSplit != null) {
        tableSplit.getStrategyType match {
          case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => {
            val splitValues = tableSplit.getStrategyValue.split(",").toList
            //获取分表名称列表
            tblList = TableSplitUtils.getRealTable(_kc,
              _mpp_Config.db_name,
              _mpp_Config.table_name,
              _mpp_Config.extender.meta.clazz,
              compact(render(_mpp_Config.extender.auth.params)), this, tableSplit, "in", splitValues)
          }
          case TableSplitUtils.StrategyTypeEnum.DATETIME => {
            splitValues = sdvData.select(colArr: _*).select(col(tableSplit.getSplitColName).cast(StringType)).distinct().collect().map(row => {
              row.getAs[String](tableSplit.getSplitColName)
            }).toList.filter(_ != null)

            tableSplit.getStrategyValue match {
              //2019-01-01 ==> 2019/201901/20190101
              case StrategyValueEnum.year => newSplitValues = splitValues.map(sv => (sv.replaceAll("-", "").substring(0, 4), sv)).toMap; splitValues = splitValues.map(_.replaceAll("-", "").substring(0, 4))
              case StrategyValueEnum.month => newSplitValues = splitValues.map(sv => (sv.replaceAll("-", "").substring(0, 6), sv)).toMap; splitValues = splitValues.map(_.replaceAll("-", "").substring(0, 6))
              case StrategyValueEnum.day => newSplitValues = splitValues.map(sv => (sv.replaceAll("-", "").substring(0, 8), sv)).toMap; splitValues = splitValues.map(_.replaceAll("-", "").substring(0, 8))
            }
            tblList = TableSplitUtils.getRealTable(_kc,
              _mpp_Config.db_name,
              _mpp_Config.table_name,
              _mpp_Config.extender.meta.clazz,
              compact(render(_mpp_Config.extender.auth.params)), this, tableSplit, "in", splitValues)
          }
          case TableSplitUtils.StrategyTypeEnum.BUSSINESS => {
            var suffixValue = ""
            if (_mpp_Config.sub_table.on_off.trim.toLowerCase == "true") {
              suffixValue = _mpp_Config.sub_table.suffix
              if (suffixValue == "" || suffixValue == null) {
                throw new IllegalArgumentException("未正确填写业务分表后缀!")
              }
            } else {
              throw new IllegalArgumentException("未开启业务分表开关!")
            }
            splitValues = List[String](suffixValue)
            tblList = TableSplitUtils.getRealTable(_kc,
              _mpp_Config.db_name,
              _mpp_Config.table_name,
              _mpp_Config.extender.meta.clazz,
              compact(render(_mpp_Config.extender.auth.params)), this, tableSplit, "=", splitValues)
          }
        }
      }
      val decimal_switch: Boolean = _kc.conf.getBoolean(CommonConstants.HASHDATA_DECIMAL_SWITCH, false)
      log.info(s"mpp decimal_switch: $decimal_switch")
      val dfAndMetaTup = DataframeUtils.buildNewDataframeHashData(tblList: List[String],
        tableSplit,
        _mpp_Config.table_name,
        _mppColsInfo,
        _mpp_Config.extract_fields,
        sdvData,
        connectEntity, decimal_switch)
      val targetData = dfAndMetaTup._1
      mppMeta = dfAndMetaTup._2
      log.info("===> " + targetData.schema.fields.mkString(","))
      //获得真实物理表和DataFrame
      tblNameAndDF =
        TableSplitUtils.getSinkRealTable(_kc,
          _mpp_Config.db_name,
          _mpp_Config.table_name,
          this,
          _mpp_Config.extender.meta.clazz,
          compact(render(_mpp_Config.extender.meta.params)),
          targetData,
          _mpp_Config.sub_table)
      //      tblNameAndDF = tblNameAndDF.map(tp => (tp._1.toLowerCase(), tp._2))
      mppDataStatusInfo = new RelationDataStatusInfo
      // 取出分区表名和DataFrame
      for (tblDF <- tblNameAndDF) {
        val numPartition = DataframeUtils.rePartitions(_kc, targetData, _mppColsInfo)
        // 上报数据相关
        val (resultData, accumulator) = DataframeUtils.calculateDataNum(_kc, tblDF._2, "MppSink")
        resultData.coalesce(numPartition).write.format(ORC_FORMAT).mode("Overwrite").save(s"${workDir + "/" + tblDF._1}")
        // 对所有分表进行累加
        var numTemp: Long = 0
        if (mppDataStatusInfo.getDataNum == null) {
          numTemp = accumulator.value.toLong
        } else {
          numTemp = mppDataStatusInfo.getDataNum.toLong + accumulator.value.toLong
        }
        mppDataStatusInfo.setDataNum(numTemp.toString)
        cosFs.delete(new Path(s"${workDir + "/" + tblDF._1}/_SUCCESS"))
      }

      logInfo("缓存数据成功.")

      val fieldList = targetData.schema.fields.toList
      loadToMpp(workDir, fieldList)

    } catch {
      case e: Exception => {
        e.printStackTrace()
        log.info("缓存数据失败,失败信息:" + e.getMessage + "失败原因:" + e.getCause)
        throw new Exception("缓存数据失败,失败信息:" + e.getMessage + "失败原因:" + e.getCause)
      }
    } finally {
      // 删除缓存数据
      deleteCacheData(workDir)
    }
  }

  /**
   * 添加cos文件系统
   */
  def addCosFileSystem(): org.apache.hadoop.conf.Configuration = {
    val hadoopConf = HadoopCosUtils.appendCosHadoopConfigs(_kc.sparkSession.sparkContext.hadoopConfiguration, cosConfig)
    _kc.sparkSession.sparkContext.hadoopConfiguration.addResource(hadoopConf)
    _kc.sparkSession.sparkContext.hadoopConfiguration
  }


  /**
   * 加载cos数据到mpp
   *
   * @param workDir 缓存路径
   * @param colList 字段信息
   */
  def loadToMpp(workDir: String,
                colList: List[StructField]): Unit = {
    val mppTmpExtTable = getMppTmpExtTableName()
    loadData(workDir, mppTmpExtTable, tblName, colList)
  }


  /**
   * 获取mpp外部表名
   *
   * @return 外部表名
   */
  def getMppTmpExtTableName() = {
    val jobId = _kc.conf.getString("job.inst.id")
    s"""\"${_mpp_Config.table_name}_${jobId}\""""
  }

  /**
   * 创建基于对象存储的mpp外部表
   *
   * @param workDir    cos临时存储目录
   * @param cosTblName cos外部表名
   * @param mppTblName mpp内部表名
   */
  def loadData(workDir: String,
               cosTblName: String,
               mppTblName: String,
               colList: List[StructField]): Unit = {
    logInfo("开始缓存加载数据至mpp...")
    val url = getConnectUrl()
    val user = _username
    val password = _password
    //外部表字段定义字符串
    val decimal_switch: Boolean = _kc.conf.getBoolean(CommonConstants.HASHDATA_DECIMAL_SWITCH, false)
    val extTblFieldDefinedStr = DataframeUtils.getMppExtTableFieldDefined(_mpp_Config.extract_fields, decimal_switch)

    val externalFlag = _kc.conf.getString("job.inst.id") + "_" + currtime
    // 拼接sql语句(方法上有参数注释)
    val externalTableDDL: String = MppUtils.getOssExternalTable(
      tblNameAndDF,
      dbName,
      extTblFieldDefinedStr,
      ORC_FORMAT,
      DELIMETER,
      cosConfig.getAccessKey,
      cosConfig.getSecretKey,
      cosConfig.getAppId,
      getLocation(workDir),
      OSS_TYPE,
      cosTblName,
      DataframeUtils.getFieldConvertExps(tableSplit, tblNameAndDF, colList, connectEntity, mppMeta),
      DataframeUtils.getTableFieldNames(colList),
      externalFlag)

    logInfo("totalSQL ==> " + externalTableDDL)

    //复制数据SQL语句
    val sqlList = List(externalTableDDL)
    val executeRs = MppUtils.executeSqls(url, user, password, sqlList)
    if (executeRs) {
      logInfo("加载缓存数据至mpp成功")
      // 上报数据状态
      DataframeUtils.reportDataStatusRelation(_kc, mppDataStatusInfo, dbName, tblName, _mpp_Config.extender.meta.clazz,
        compact(render(_mpp_Config.extender.meta.params)))
      //上报运维中心指标
      val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(_kc)
      metric.setProcessDataLValue(mppDataStatusInfo.getDataNum.toLong)
      CenterMetricUtils.reportSyncProcessData(metric, _kc)
    } else {
      throw new Exception("加载缓存数据至mpp失败")
    }
  }

  /**
   * 删除cos上的缓存数据
   *
   * @param workDir 缓存路径
   */

  def deleteCacheData(workDir: String) = {
    logInfo("开始删除缓存数据...")
    if (cosFs.exists(new Path(workDir))) {
      val res: Boolean = cosFs.delete(new Path(workDir))
      if (res == true) {
        logInfo("删除缓存数据成功!")
      } else {
        log.info("删除缓存数据失败!")
      }
    } else {
      log.info("无缓存数据可删除!")
    }

  }

  /**
   * 获取数据在cos的物理存储目录
   *
   * @param workDir
   * @return 缓存路径
   */
  def getLocation(workDir: String): String = {
    val bucket = cosConfig.getBucket
    val endpoint = cosConfig.getEndPoint
    var location: String = ""
    if (workDir.startsWith("/")) {
      //      s"${bucket}.${endpoint}${workDir}"
      location = s"${endpoint}/${bucket}${workDir}"

    } else {
      location = s"${endpoint}/${bucket}/${workDir}"
    }
    log.info(s"LOCATION: ${location}")
    location
  }

  /**
   * 获取mpp连接url
   *
   * @return url连接
   */
  def getConnectUrl(): String = {
    val host = _host
    val port = _port
    val dbName = _instansename
    val url = s"jdbc:postgresql://${host}:${port}/${dbName}"
    log.info("url: " + url)
    url
  }
}
