package com.kingsoft.dc.khaos.module.spark.source

import java.util.Properties

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, MysqlConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source.{ExtractFieldInfo, MySQLConfig, RdbmsAdvancedOption, SplitOption}
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util.{DataframeUtils, MetaUtils, TableSplitUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable.ArrayBuffer

class MysqlSourceDbs extends SourceStrategy with Logging {

  private var _mysqlMeta: MetaDataEntity = null
  private var _mysqlConfig: MySQLConfig = null

  private val _driver = "com.mysql.jdbc.Driver"
  private var _ip = ""
  private var _port = ""
  private var _userName = ""
  private var _passWord = ""
  private var _jdbcUrl = ""

  private var _jdbc_url_param = ""
  private var _jdbc_fetch_size = "1000"
  private var _jdbc_sharding_size = 200

  private var dbName = ""
  private var tblName = ""
  private var filter = ""
  private var advancedOptions: RdbmsAdvancedOption = _
  private var extractFields: List[ExtractFieldInfo] = _

  private var selectedFields = ""
  private var executeTableDataSql = ""
  private var _splite_mode: SplitOption = null

  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {
    // 初始化相关变量
    init(kc, config)

    //构造sql语句中需要选中的字段，将mysql特有的time类型转为string类型
    // 初始化 selectedFields
    selectedFields = getSelectFields()

    //拼接sql查询语句，对表进行行筛选和列裁剪
    executeTableDataSql = generateTableQuery(tblName)

    _jdbcUrl = s"jdbc:mysql://${_ip}:${_port}/$dbName${_jdbc_url_param}"
    log.info(s"jdbc => ${_jdbcUrl}, executeTableDataSql => $executeTableDataSql")
    val prop = getJDBCProp()

    //高级切分开关是否开启
    if (advancedOptions.split_mode.nonEmpty && _splite_mode.on_off.equals(true) && _splite_mode.split_options.get.equalsIgnoreCase("split_with_fields")) {
      log.info("高级功能切分设置打开，按字段切分模式正在开启...")
      val df: DataFrame = getAdvancedOnDF(kc, _mysqlConfig.extender.meta.clazz, compact(render(_mysqlConfig.extender.meta.params)), prop, _splite_mode, extractFields)
      df
    } else {
      val df: DataFrame = getAdvancedOffDF(kc, _mysqlConfig.extender.meta.clazz, compact(render(_mysqlConfig.extender.meta.params)), prop)
      df
    }
  }

  /** 初始化参数 */
  def init(kc: KhaosContext, config: String): Unit = {
    implicit val formats = DefaultFormats
    // 初始化mysql conf
    initMysqlConf(config)

    // load config 配置文件参数
    loadProperties(kc)

    //初始化mysql connect
    initMysqlConnect(kc, config)

  }

  /** 高级功能开启后的读取表 */
  def getAdvancedOnDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, prop: Properties, splitMode: SplitOption, extractFields: List[ExtractFieldInfo]): _root_.org.apache.spark.sql.DataFrame = {
    var df: DataFrame = null

    val splitField = splitMode.split_field
    val splitNums = splitMode.split_nums
    //检验切分参数
    val splitParamsCheck = checkSplitParams(splitField, splitNums, extractFields)
    if (splitParamsCheck) {
      log.info("高级功能切分模式开启成功!")
      df = getPartitionSqlDF(kc, prop, tblName, splitField, splitNums.toInt)
    } else {
      log.warn("高级功能切分模式开启失败，转为分页模式执行！")
      df = getPagingSqlDF(kc, prop, tblName)
    }

    df
  }

  /**
    * 高级参数关闭,采用分页获取df
    *
    * @param kc
    * @param metaClazz
    * @param metaParamsJson
    * @param prop
    * @return
    */
  def getAdvancedOffDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, prop: Properties): DataFrame = {
    var df: DataFrame = null

    df = getPagingSqlDF(kc, prop, tblName)

    df
  }


  /**
    * spark默认分区实现,基于索引或主键上下界
    *
    * @param kc
    * @param prop
    * @param splitField
    * @param splitNums
    * @return
    */
  def getPartitionSqlDF(kc: KhaosContext, prop: Properties, oneTableName: String, splitField: String, splitNums: Int): DataFrame = {
    val rangeSql = generateFieldBoundQuery(oneTableName, splitField)
    log.info(s"rangeSql => $rangeSql")
    val rangeDF = kc.sparkSession.read.jdbc(_jdbcUrl, rangeSql, prop)
    val rangeBoundDF = getLongBoundDF(rangeDF).collect

    //将rangeBoundDF中的Any型统一转为Long型
    val maxNum = rangeBoundDF(0)(0).asInstanceOf[Long]
    val minNum = rangeBoundDF(0)(1).asInstanceOf[Long]
    val oneExecuteTableDataSql = generateTableQuery(oneTableName)
    kc.sparkSession.read.jdbc(_jdbcUrl, oneExecuteTableDataSql, splitField, minNum, maxNum, splitNums.toInt, prop)
  }

  /**
    * 分页实现 limit 0,1000
    * limit 1000,1000 ...
    *
    * @param kc
    * @param prop
    * @return
    */
  def getPagingSqlDF(kc: KhaosContext, prop: Properties, oneTableName: String): DataFrame = {
    var df: DataFrame = null
    val countSql = getCountTableSql(oneTableName)
    val countDF = kc.sparkSession.read.jdbc(_jdbcUrl, countSql, prop).collect()
    val count = countDF(0)(0).asInstanceOf[Long]
    val nonExecuteTableDataSql = getTableDataSql(oneTableName)
    val sampleDF = getSampleDF(kc, oneTableName, prop, count)
    val sqls = getPagingSqls(kc, nonExecuteTableDataSql, count, sampleDF)
    log.info(s"countSql: $countSql => $count")
    kc.conf.set(CommonConstants.DATA_COUNT_SET_NUMS, String.valueOf(count))

    for (sql <- sqls) {
      val index = sqls.indexOf(sql)
      if (index == 0) {
        df = kc.sparkSession.read.jdbc(_jdbcUrl, sql, prop)
      } else {
        df = df.union(kc.sparkSession.read.jdbc(_jdbcUrl, sql, prop))
      }
    }
    df
  }

  def getCountTableSql(tblName: String): String = {
    var countSql = s"(select count(1) from $tblName) mysqlCount"
    if (!StringUtils.isBlank(filter)) {
      countSql = s"(select count(1) from $tblName where $filter) mysqlCount"
    }
    log.info(s"countSql => $countSql")
    countSql
  }

  def getSampleDF(kc: KhaosContext, tableName: String, prop: Properties, count: Long): DataFrame = {
      var df: DataFrame = null
      var sampleSql = ""
      var randNum = 0
      for (i <- 0 to 9) {
          if (count.toInt < 1) {
              randNum = scala.util.Random.nextInt(1)
          } else {
              randNum = scala.util.Random.nextInt(count.toInt)
          }
          sampleSql = s"(select * from $tableName limit $randNum,1) mysqlSample"
          log.info(s"sampleSql => $sampleSql")
          if (i == 0) {
              df = kc.sparkSession.read.jdbc(_jdbcUrl, sampleSql, prop)
          } else {
              df = df.union(kc.sparkSession.read.jdbc(_jdbcUrl, sampleSql, prop))
          }
      }
      df
  }

  def getPagingSqls(kc: KhaosContext, baseSql: String, count: Long , sampleData:DataFrame): ArrayBuffer[String] = {
    var sqls = new ArrayBuffer[String]
    var segmentations = 0
    if (advancedOptions.split_mode.nonEmpty && _splite_mode.on_off.equals(true) && _mysqlConfig.advanced_options.split_mode.get.split_options.get.equalsIgnoreCase("split_with_estimate")) {
      log.info("高级功能切分设置打开，按估算切分模式开启...")
      segmentations = DataframeUtils.estimateTaskSegmentation( count, sampleData,  _jdbc_sharding_size)
    } else {
      log.info("高级功能切分设置关闭，按默认资源数切分模式开启...")
      //20210924企业云2.0.8修复oracle2hive同步count效率问题
      segmentations = kc.sparkSession.conf.get("spark.dynamicAllocation.maxExecutors", "2").toInt
    }
    logInfo("segmentations: " + segmentations)
    val partitionNum = Math.ceil(count / segmentations).toInt
    logInfo("partitionNum: " + partitionNum)
    for (step <- 0 until segmentations) {
      var sql = ""
      val begin = step * partitionNum
      sql = s"($baseSql limit $begin, ${partitionNum}) pagingSql"
      if (step == segmentations - 1) {
        val partitionNumLast = count - begin
        sql = s"($baseSql limit $begin, $partitionNumLast) pagingSql"
      }
      sqls += sql
      log.info(s"Paging Sql: $step => $sql")
    }
    sqls
  }

  def getTableDataSql(tblName: String): String = {
    var tblDataSql = s"select $selectedFields from $tblName"
    if (!StringUtils.isBlank(filter)) {
      tblDataSql = s"select $selectedFields from $tblName where $filter"
    }
    tblDataSql
  }


  // 生成查询表的SQL
  def generateTableQuery(tblName: String): String = {
    var tableQuery = s"(select $selectedFields from $tblName) MySQLSourceTmp"
    if (filter != null && !filter.trim.equals("")) {
      tableQuery = s"(select $selectedFields from $tblName where $filter) MySQLSourceTmp"
    }
    tableQuery
  }

  //21202F2938212B3E22272626252E434D
  // 生成高级切分功能的SQL
  def generateFieldBoundQuery(tblName: String, splitField: String): String = {
    var fieldBound = s"(select max($splitField),min($splitField) from ${tblName}) MySQLFieldBoundTmp"
    if (filter != null && !filter.trim.equals("")) {
      fieldBound = s"(select max($splitField),min($splitField) from ${tblName} where $filter) MySQLFieldBoundTmp"
    }
    fieldBound
  }


  def getJDBCProp(): Properties = {
    val prop = new Properties
    prop.put("driver", _driver)
    prop.put("user", _userName)
    prop.put("password", _passWord)
    prop.put("fetchsize", _jdbc_fetch_size)
    prop
  }

  def initMysqlConf(config: String): Unit = {
    implicit val formats = DefaultFormats

    _mysqlConfig = parse(config, true).extract[MySQLConfig]
    dbName = _mysqlConfig.db_name
    tblName = _mysqlConfig.table_name
    filter = _mysqlConfig.filter
    advancedOptions = _mysqlConfig.advanced_options
    extractFields = _mysqlConfig.extract_fields
    _splite_mode = _mysqlConfig.advanced_options.split_mode.getOrElse(SplitOption())
  }

  def initMysqlConnect(kc: KhaosContext, config: String): Unit = {
    implicit val formats = DefaultFormats
    val mysqlConfig = parse(config, true).extract[MySQLConfig]
    val PROJECT_ID: Int = kc.conf.getString(SchedulerConstants.PROJECT_ID).toInt
    var metaParamsMap: Map[String, Any] = mysqlConfig.extender.meta.params.values
    metaParamsMap = metaParamsMap.updated("project_id", PROJECT_ID)
    import org.json4s.DefaultFormats
    import org.json4s.native.Json
    val metaJson: String = Json(DefaultFormats).write(metaParamsMap)


    _mysqlMeta = MetaUtils.getMysqlDs(kc,
      _mysqlConfig.db_name,
      _mysqlConfig.table_name,
      _mysqlConfig.extender.meta.clazz,
      metaJson,
      this)

    val connect = _mysqlMeta.dsMysqlConnect
    _ip = connect.getHost
    _port = connect.getPort
    _userName = connect.getUserName
    _passWord = connect.getPassWord
  }

  def getSelectFields(): String = {
    val selectFields = new StringBuffer()
    for (ef <- extractFields) {
      if (ef.data_type.equalsIgnoreCase("TIME")) {
        selectFields.append(s"time_format(${ef.field}, '%T') as ${ef.field},")
      } else {
        selectFields.append(s"${ef.field},")
      }
    }
    selectFields.toString.dropRight(1)
  }

  /**
    * load config properties 配置
    *
    * @param kc
    */
  def loadProperties(kc: KhaosContext): Unit = {
    val mysqlProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.mysql.source.").toMap
    log.info("MysqlSource Properties")
    mysqlProperties.foreach { case (k, v) => log.info(k + "   " + v) }
    _jdbc_url_param = mysqlProperties.getOrElse(MysqlConstants.MODULE_MYSQL_SOURCE_JDBC_URL_PARAM, "")
    _jdbc_fetch_size = mysqlProperties.getOrElse(MysqlConstants.MODULE_MYSQL_SOURCE_JDBC_FETCHSIZE, "10000")
    _jdbc_sharding_size = mysqlProperties.getOrElse(MysqlConstants.MODULE_MYSQL_SOURCE_JDBC_PAGING_SIZE, "200").toInt
  }

  /**
    * 检验切分设置功能的参数是否合规
    *
    * @param splitField    : 切分字段
    * @param splitNums     : 切分数量
    * @param extractFields : 源表字段信息
    * @return
    */
  def checkSplitParams(splitField: String, splitNums: String, extractFields: List[ExtractFieldInfo]): Boolean = {
    var result = false
    if (!StringUtils.isBlank(splitField) && !StringUtils.isBlank(splitNums)) {
      try {
        for (ef <- extractFields) {
          if (splitField.equals(ef.field) && ef.data_type.equals("NUMBER")) {
            if (1 <= splitNums.toInt && splitNums.toInt <= 1000) {
              logInfo(s"切分参数校验通过！切分数量为：$splitNums，切分字段为：$splitField")
              result = true
            } else {
              log.error(s"切分数量输入有误，目前只支持1到1000的整数！实际输入数量为：$splitNums")
            }
          }
        }
        if (!result) {
          log.error(s"切分字段输入有误，目前只支持整型字段！实际输入字段为：$splitField")
        }
      } catch {
        case e: Exception => {
          log.error(s"切分字段或切分数量输入有误，切分字段目前只支持整型字段，切分数量只支持1到1000的整数！实际输入数量为：$splitNums，实际输入字段为：$splitField")
        }
      }
    }
    result
  }

  /**
    * 将最值df数据类型统一转为Long型，以便后续用上下边界调用多分区并行读取api
    * mysql中最值df的类型有IntegerType、LongType和DecimalType(20,0)，oracle中最值df的类型为DecimalType(38, 10)
    *
    * @param df : 不同数据类型的字段最值df
    * @return : 统一转为Long型的字段最值df
    */
  def getLongBoundDF(df: DataFrame): DataFrame = {
    //log.info(s"MySQL FieldBoundDF start schema is: ${df.printSchema}")
    //log.info(s"MySQL FieldBoundDF start is: ${df.show}")
    //需要转换的列名
    val colName = ArrayBuffer[String]()
    val schema = df.schema.foreach(s => {
      if (!s.dataType.equals(LongType) || s.dataType.equals(DecimalType(38, 10))) {
        colName += s.name
      }
    })

    //字段类型转换
    var df_long = df
    colName.foreach(name => {
      df_long = df_long.withColumn(name, col(name).cast(LongType))
    })
    //log.info(s"MySQL LongBoundDF end schema is: ${df_long.printSchema}")
    //log.info(s"MySQL LongBoundDF end is: ${df_long.show}")
    df_long
  }

  override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema = ArrayBuffer[KhaosStructField]()
    implicit val formats = DefaultFormats
    val mySQLInfo = parse(config, true).extract[MySQLConfig]
    val extrFields = mySQLInfo.extract_fields

    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }
}
