package com.kingsoft.dc.khaos.module.spark.source

import java.util.Properties
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.extender.meta.model.ds.MysqlConnect
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, MetaDataConstants, MysqlConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source.{ShardingSourceInfo, _}
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util.{DataframeUtils, MetaUtils, TableSplitUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.util.{Failure, Success, Try}

class MysqlSource extends SourceStrategy with Logging {

  private var _mysqlMeta: MetaDataEntity = _
  private var _mysqlConfig: MySQLConfig = _

  private var _driver = ""
  private var _ip = ""
  private var _port = ""
  private var _userName = ""
  private var _passWord = ""
  private var _jdbcUrl = ""

  private var _jdbc_url_param = ""
  private var _jdbc_fetch_size = "1000"
  private var _jdbc_sharding_size = 200

  private var dbName = ""
  private var tblName = ""
  private var dbAndTbl = ""
  private var pdbAndTbl = ""
  private var filter = ""
  private var shardingSwitch = false
  private var shardingSourceList: List[ShardingSourceInfo] = _
  private var shardingDb = ""
  private var shardingDbKey = ""
  private var shardingDbType = ""
  private var shardingDbValue = ""
  private var shardingTable = ""
  private var shardingTableKey = ""
  private var shardingTableType = ""
  private var shardingTableValue = ""
  private val regexKey = ".*#.*#.*"
  private var advancedOptions: RdbmsAdvancedOption = _
  private var extractFields: List[ExtractFieldInfo] = _
  private var splitOptions: String = ""


  private var selectedFields = ""
  private var executeTableDataSql = ""
  private val shardingJDBCUrlArr = new ArrayBuffer[String]()
  private val shardingJDBCPropArr = new ArrayBuffer[Properties]()
  private var cartesianProdTable: ListBuffer[String] = _
  private var _splite_mode: SplitOption = _

  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {
    var meta_origin: String = kc.conf.getString(SchedulerConstants.META_ORIGIN, MetaDataConstants.META_DATAMANAGEMENT)
    meta_origin match {
      case MetaDataConstants.META_DATAMANAGEMENT =>
        this.read(kc, module_id, config, dependence)
      case MetaDataConstants.META_DBSERVER =>
        val dbs: MysqlSourceDbs = new MysqlSourceDbs
        dbs.source(kc, module_id, config, dependence)
      case _ =>
        throw new Exception(s"mysqlSource meta_origin 参数错误! ==> $meta_origin")
    }
  }

  def read(kc: KhaosContext,
           module_id: String,
           config: String,
           dependence: Dependency): DataFrame = {
    // 初始化相关变量
    init(kc, config)

    //构造sql语句中需要选中的字段，将mysql特有的time类型转为string类型
    // 初始化 selectedFields
    selectedFields = getSelectFields

    //拼接sql查询语句，对表进行行筛选和列裁剪
    executeTableDataSql = generateTableQuery(dbAndTbl)

    _jdbcUrl = s"jdbc:mysql://${_ip}:${_port}${_jdbc_url_param}"
    log.info(s"driver => ${_driver}, jdbc => ${_jdbcUrl}, executeTableDataSql => $executeTableDataSql")
    val prop: Properties = getJDBCProp


    //高级切分开关、分库分表是否开启
    var df: DataFrame = null
    splitOptions = _splite_mode.split_options.getOrElse("")
    if (shardingSwitch) {
      //分库分表开启
      log.info("分库分表模式打开")
      initShardingDbAndTb(kc)
      if (advancedOptions.split_mode.nonEmpty && _splite_mode.on_off) {
        //切分打开
        log.info("切分设置打开")
        splitOptions match {
          case MysqlConstants.SPLIT_WITH_FIELDS =>
            //按字段切分
            log.info("按字段切分")
            df = getShardingSplitWithFieldDF(kc, df)
          case MysqlConstants.SPLIT_WITH_ESTIMATE =>
            //估值分页
            log.info("按估值切分")
            df = getShardingSplitWithEstimateDF(kc, df)
        }

      } else {
        //切分关闭,按照资源数切分
        log.info("切分设置关闭")
        log.info("按照资源数切分")
        df = getShardingSplitWithCuDF(kc, df)
      }
    } else {
      //分库分表关闭
      log.info("分库分表模式关闭")
      if (advancedOptions.split_mode.nonEmpty && _splite_mode.on_off) {
        //切分打开
        log.info("切分设置打开")
        splitOptions match {
          case MysqlConstants.SPLIT_WITH_FIELDS =>
            //按字段切分
            log.info("按字段切分")
            df = getSplitWithFieldDF(kc, _mysqlConfig.extender.meta.clazz, compact(render(_mysqlConfig.extender.meta.params)), _jdbcUrl, prop, _splite_mode, extractFields)
          case MysqlConstants.SPLIT_WITH_ESTIMATE =>
            //估值分页
            log.info("按估值切分")
            df = getSplitWithEstimateDF(kc, _mysqlConfig.extender.meta.clazz, compact(render(_mysqlConfig.extender.meta.params)), _jdbcUrl, prop)
        }
      } else {
        //切分关闭,按照资源数切分
        log.info("切分设置关闭")
        log.info("按照资源数切分")
        df = getSplitWithCuDF(kc, _mysqlConfig.extender.meta.clazz, compact(render(_mysqlConfig.extender.meta.params)), _jdbcUrl, prop)
      }
    }

    df
  }


  /** 初始化参数 */
  def init(kc: KhaosContext, config: String): Unit = {
    implicit val formats: DefaultFormats.type = DefaultFormats
    // 初始化mysql conf
    initMysqlConf(config)

    // load config 配置文件参数
    loadProperties(kc)

    //权限校验
    checkReadRight(kc)

    //初始化mysql connect
    initMysqlConnect(kc)

  }

  /** 初始化分库分表 */
  def initShardingDbAndTb(kc: KhaosContext): Unit = {
    //获取jdbcurl和prop
    for (i <- shardingSourceList) {
      shardingJDBCUrlArr.append(initShardingMysqlUrl(kc, i.dsName, i.dsId, i.dataType))
      shardingJDBCPropArr.append(getJDBCProp)
    }
  }


  /** 高级功能开启后的读取表(字段切分) */
  def getSplitWithFieldDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties, splitMode: SplitOption, extractFields: List[ExtractFieldInfo]): _root_.org.apache.spark.sql.DataFrame = {
    var df: DataFrame = null

    val tableSplit: DmTableSplit = TableSplitUtils.getTableSplit(kc, dbName, tblName, metaClazz, metaParamsJson)
    //不分表
    if (tableSplit == null) {

      val splitField: String = splitMode.split_field
      val splitNums: String = splitMode.split_nums
      //检验切分参数
      val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
      if (splitParamsCheck) {
        log.info("高级功能切分模式开启成功!")
        df = getPartitionSqlDF(kc, jdbcUrl, prop, dbAndTbl, splitField, splitNums.toInt)
      } else {
        log.warn("高级功能切分模式开启失败，转为分页模式执行！")
        df = getPagingSqlDF(kc, jdbcUrl, prop, dbAndTbl)
      }
    } else { // 分表, 注意分表实现不能使用成员tblName变量 !!!
      tableSplit.getStrategyType match {
        //枚举分表
        case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => {
          val tblNameList: List[String] = getShardingEnumTableList(kc, tableSplit, metaClazz, metaParamsJson)
          df = getShardingEnumTableDF(kc, jdbcUrl, prop, tblNameList, splitMode)
        }
        //日期分表 读取当前任务批次时间的物理分表
        case TableSplitUtils.StrategyTypeEnum.DATETIME => {
          val tblNameList: List[String] = getShardingDatetimeTableList(kc, tableSplit, metaClazz, metaParamsJson)
          df = getShardingDatetimeTablesDF(kc, jdbcUrl, prop, tblNameList, splitMode)
        }
      }
    }
    df
  }

  /**
   */
  def getAdvancedOffDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    var df: DataFrame = null
    val tableSplit: DmTableSplit = TableSplitUtils.getTableSplit(kc, dbName, tblName,
      metaClazz,
      metaParamsJson)
    // 不分表
    if (tableSplit == null) {
      df = getPagingSqlDF(kc, jdbcUrl, prop, dbAndTbl)
    } else { //分表
      tableSplit.getStrategyType match {
        case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => { //枚举分表
          val tblNameList: List[String] = getShardingEnumTableList(kc, tableSplit, metaClazz, metaParamsJson)
          df = getShardingEnumTableDF(kc, jdbcUrl, prop, tblNameList, null)
        }
        //日期分表
        case TableSplitUtils.StrategyTypeEnum.DATETIME => {
          val tblNameList: List[String] = getShardingDatetimeTableList(kc, tableSplit, metaClazz, metaParamsJson)
          df = getShardingDatetimeTablesDF(kc, jdbcUrl, prop, tblNameList, null)
        }
      }
    }
    df
  }

  def getSplitWithEstimateDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    getAdvancedOffDF(kc, metaClazz, metaParamsJson, jdbcUrl, prop)
  }

  def getSplitWithCuDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    splitOptions = MysqlConstants.SPLIT_WITH_CU
    getAdvancedOffDF(kc, metaClazz, metaParamsJson, jdbcUrl, prop)
  }


  def getShardingSplitWithCuDF(kc: KhaosContext, data: DataFrame): DataFrame = {
    splitOptions = MysqlConstants.SPLIT_WITH_CU
    getShardingPagingDF(kc, data)
  }

  def getShardingSplitWithEstimateDF(kc: KhaosContext, data: DataFrame): DataFrame = {
    getShardingPagingDF(kc, data)
  }

  def getShardingPagingDF(kc: KhaosContext, data: DataFrame): DataFrame = {
    var resultData: DataFrame = data
    log.info("testetst====")
    for (i <- shardingSourceList.indices) {
      //重置库表正则&笛卡尔积,
      shardingDb = _mysqlConfig.shardingDb
      shardingTable = _mysqlConfig.shardingTable
      cartesianProdTable = getCartesianProd(kc, i)
      for (tb <- cartesianProdTable) {
        val index: Int = cartesianProdTable.indexOf(tb)
        if (index == 0 && i == 0) {
          resultData = getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb)
        } else {
          val t = Try(resultData.first())
          t match {
            case Failure(exception) => resultData = getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb)
            case Success(value) =>
              val f = Try(getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb).first())
              f match {
                case Failure(exception) => resultData = resultData
                case Success(value) => resultData = resultData.union(getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb))
              }
          }
          //          if (resultData.limit(1).rdd.isEmpty()) {
          //            resultData = getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb)
          //          } else if (getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb).isEmpty) {
          //            resultData = resultData
          //          } else {
          //            resultData = resultData.union(getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb))
          //          }
        }
      }
    }
    resultData
  }

  def getShardingPartitionDF(kc: KhaosContext, data: DataFrame, splitField: String, splitNums: String): DataFrame = {
    var resultData: DataFrame = data
    for (i <- shardingSourceList.indices) {
      //重置库表正则&笛卡尔积,
      shardingDb = _mysqlConfig.shardingDb
      shardingTable = _mysqlConfig.shardingTable
      cartesianProdTable = getCartesianProd(kc, i)
      for (tb <- cartesianProdTable) {
        val index: Int = cartesianProdTable.indexOf(tb)
        if (index == 0 && i == 0) {
          resultData = getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt)
        } else {
          val t = Try(resultData.first())
          t match {
            case Failure(exception) => resultData = getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt)
            case Success(value) =>
              val f = Try(getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt).first())
              f match {
                case Failure(exception) => resultData = resultData
                case Success(value) => resultData = resultData.union(getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt))
              }
          }


//          if (resultData.head(1).isEmpty) {
//            resultData = getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt)
//          } else if (getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt).isEmpty) {
//            resultData = resultData
//          } else {
//            resultData = resultData.union(getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt))
//          }

        }
      }
    }
    resultData
  }

  def getShardingSplitWithFieldDF(kc: KhaosContext, data: DataFrame): DataFrame = {
    var resultData: DataFrame = data
    val splitMode: SplitOption = advancedOptions.split_mode.get
    val splitField: String = splitMode.split_field
    val splitNums: String = splitMode.split_nums
    //检验切分参数
    val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
    if (splitParamsCheck) {
      log.info("高级功能切分模式开启成功!")
      resultData = getShardingPartitionDF(kc, resultData, splitField, splitNums)
    } else {
      log.warn("高级功能切分模式开启失败，估算/分页查询！")
      resultData = getShardingSplitWithCuDF(kc, resultData)
    }
    resultData
  }

  /**
   * 获取时间分表tablelist
   *
   */
  def getShardingDatetimeTableList(kc: KhaosContext, tableSplit: DmTableSplit, metaClazz: String, metaParamsJson: String): List[String] = {
    var tblNameList: List[String] = List[String]()
    val jobBizDate: String = kc.conf.getString(SchedulerConstants.BIZ_DATE)
    val splitTime: String = tableSplit.getStrategyValue match {
      case TableSplitUtils.StrategyValueEnum.year => jobBizDate.substring(0, 4)
      case TableSplitUtils.StrategyValueEnum.month => jobBizDate.substring(0, 6)
      case TableSplitUtils.StrategyValueEnum.day => jobBizDate
    }
    val splitValues: scala.List[String] = scala.List[String](splitTime)
    //获取分表名称列表
    tblNameList = TableSplitUtils.getRealTable(kc, dbName, tblName, metaClazz, metaParamsJson, this, tableSplit, "=", splitValues)
    //    tblNameList = tblNameList.map(tblname => tblname.toLowerCase())
    if (tblNameList.isEmpty) {
      throw new Exception(s"读取的分表不存在=>tableName:${_mysqlConfig.table_name},分表值:${splitValues.mkString(",")}")
    }
    val tblNames: String = tblNameList.mkString(",")
    log.info(s"Sharding Datetime tables => $tblNames")
    tblNameList
  }


  /**
   * 获取枚举分表tablelist
   */
  def getShardingEnumTableList(kc: KhaosContext, tableSplit: DmTableSplit, metaClazz: String, metaParamsJson: String): List[String] = {
    var tblNameList: List[String] = List[String]()
    val splitValues: List[String] = tableSplit.getStrategyValue.split(",").toList
    //获取分表名称列表
    tblNameList = TableSplitUtils.getRealTable(kc, dbName, tblName, metaClazz, metaParamsJson, this, tableSplit, "in", splitValues)
    //    tblNameList = tblNameList.map(tblname => tblname.toLowerCase())
    val tblNames: String = tblNameList.mkString(",")
    log.info(s"Sharding Enum tables => $tblNames")
    tblNameList
  }

  /**
   * 时间分表 getDF实现, 实际为一个表
   */
  def getShardingDatetimeTablesDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tblNameList: List[String], splitMode: SplitOption): DataFrame = {
    var datetimeTablesDF: DataFrame = null
    val oneTableName: String = tblNameList.head

    if (splitMode == null) {
      log.info("高级分区功能关闭, 采用分页执行!")
      datetimeTablesDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)

    } else {
      val splitField: String = splitMode.split_field
      val splitNums: String = splitMode.split_nums

      //检验切分参数
      val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
      if (splitParamsCheck) {
        log.info("检测高级参数通过, 开启成功!")
        datetimeTablesDF = getPartitionSqlDF(kc, jdbcUrl, prop, oneTableName, splitField, splitNums.toInt)
      } else {
        log.warn("高级功能切分模式开启失败, 转为分页模式执行！")
        datetimeTablesDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
      }
    }
    datetimeTablesDF
  }


  /**
   * 枚举分表 getDF实现, 实际为多表
   */
  def getShardingEnumTableDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tblNameList: List[String], splitMode: SplitOption): DataFrame = {
    //遍历分表名称进行读取 然后UNION
    var enumTablesDF: DataFrame = null
    tblNameList.foreach((oneTableName: String) => {
      val index: Int = tblNameList.indexOf(oneTableName)
      var oneTableDF: DataFrame = null

      if (splitMode == null) { //分页
        log.warn("高级功能切分模式关闭, 进入分表枚举分支, 采用分页模式执行！")
        oneTableDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
      } else { // 高级参数开启,分区模式
        val splitField: String = splitMode.split_field
        val splitNums: String = splitMode.split_nums
        //检验切分参数
        val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
        if (splitParamsCheck) {
          log.info("检测高级参数通过, 开启成功!")
          oneTableDF = getPartitionSqlDF(kc, jdbcUrl, prop, oneTableName, splitField, splitNums.toInt)
        } else {
          log.warn("高级功能切分模式开启失败，转为分页模式执行！")
          oneTableDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
        }
      }

      if (index == 0) {
        enumTablesDF = oneTableDF
      } else {
        enumTablesDF = enumTablesDF.union(oneTableDF)
      }

    })
    enumTablesDF
  }

  /**
   * spark默认分区实现,基于索引或主键上下界
   */
  def getPartitionSqlDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tableName: String, splitField: String, splitNums: Int): DataFrame = {
    val rangeSql: String = generateFieldBoundQuery(tableName, splitField)
    log.info(s"rangeSql => $rangeSql")
    val rangeDF: DataFrame = kc.sparkSession.read.jdbc(jdbcUrl, rangeSql, prop)
    val rangeBoundDF: Array[Row] = getLongBoundDF(rangeDF).collect

    //将rangeBoundDF中的Any型统一转为Long型
    val maxNum: Long = rangeBoundDF(0)(0).asInstanceOf[Long]
    val minNum: Long = rangeBoundDF(0)(1).asInstanceOf[Long]
    val oneExecuteTableDataSql: String = generateTableQuery(tableName)
    kc.sparkSession.read.jdbc(jdbcUrl, oneExecuteTableDataSql, splitField, minNum, maxNum, splitNums.toInt, prop)
  }

  /**
   * 分页实现 limit 0,1000
   * limit 1000,1000 ...
   *
   * @param kc
   * @param prop
   * @return
   */
  def getPagingSqlDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tableName: String): DataFrame = {
    var df: DataFrame = null
    val countSql: String = getCountTableSql(tableName)
    logInfo("jdbcUrl" + jdbcUrl)

    val countDF: Array[Row] = kc.sparkSession.read.jdbc(jdbcUrl, countSql, prop).collect()
    val count: Long = countDF(0)(0).asInstanceOf[Long]
    val nonExecuteTableDataSql: String = getTableDataSql(tableName)
    val sampleDF: DataFrame = getSampleDF(kc, jdbcUrl, tableName, prop, count)
    val sqls: ArrayBuffer[String] = getPagingSqls(kc, nonExecuteTableDataSql, count, sampleDF)
    log.info(s"countSql: $countSql => $count")
    kc.conf.set(CommonConstants.DATA_COUNT_SET_NUMS, String.valueOf(count))

    for (sql <- sqls) {
      val index: Int = sqls.indexOf(sql)
      if (index == 0) {
        df = kc.sparkSession.read.jdbc(jdbcUrl, sql, prop)
      } else {
        df = df.union(kc.sparkSession.read.jdbc(jdbcUrl, sql, prop))
      }
    }
    df
  }

  def getCountTableSql(tblName: String): String = {
    var countSql = ""
    if (!shardingSwitch) {
      countSql = s"(select count(1) from $pdbAndTbl) mysqlCount"
      if (!StringUtils.isBlank(filter)) {
        countSql = s"(select count(1) from $pdbAndTbl where $filter) mysqlCount"
      }
    } else {
      countSql = s"(select count(1) from $tblName) mysqlCount"
      if (!StringUtils.isBlank(filter)) {
        countSql = s"(select count(1) from $tblName where $filter) mysqlCount"
      }
    }
    log.info(s"countSql => $countSql")
    countSql
  }

  def getSampleDF(kc: KhaosContext, jdbcUrl: String, tableName: String, prop: Properties, count: Long): DataFrame = {
    var df: DataFrame = null
    var sampleSql = ""
    var randNum = 0
    for (i <- 0 to 9) {
      if (count.toInt < 1) {
        randNum = scala.util.Random.nextInt(1)
      } else {
        randNum = scala.util.Random.nextInt(count.toInt)
      }
      if (!shardingSwitch) {
        sampleSql = s"(select * from $pdbAndTbl limit $randNum,1) mysqlSample"
      } else {
        sampleSql = s"(select * from $tableName limit $randNum,1) mysqlSample"
      }
      log.info(s"sampleSql => $sampleSql")
      if (i == 0) {
        df = kc.sparkSession.read.jdbc(jdbcUrl, sampleSql, prop)
      } else {
        df = df.union(kc.sparkSession.read.jdbc(jdbcUrl, sampleSql, prop))
      }
    }
    df
  }

  def getPagingSqls(kc: KhaosContext, baseSql: String, count: Long, sampleDF: DataFrame): ArrayBuffer[String] = {
    var sqls = new ArrayBuffer[String]
    var segmentations = 1
    splitOptions match {
      case MysqlConstants.SPLIT_WITH_ESTIMATE =>
        segmentations = DataframeUtils.estimateTaskSegmentation(count, sampleDF, _jdbc_sharding_size)
      case MysqlConstants.SPLIT_WITH_CU =>
        //20210924企业云2.0.8修复oracle2hive同步count效率问题
        segmentations = kc.sparkSession.conf.get("spark.dynamicAllocation.maxExecutors", "2").toInt
    }
    logInfo("segmentations: " + segmentations)
    val partitionNum: Int = Math.ceil(count / segmentations).toInt
    logInfo("partitionNum: " + partitionNum)
    for (step <- 0 until segmentations) {
      var sql = ""
      val begin: Int = step * partitionNum
      sql = s"($baseSql limit $begin, ${partitionNum}) pagingSql"
      if (step == segmentations - 1) {
        val partitionNumLast: Long = count - begin
        sql = s"($baseSql limit $begin, $partitionNumLast) pagingSql"
      }
      sqls += sql
      log.info(s"Paging Sql: $step => $sql")
    }
    sqls
  }

  def getTableDataSql(tblName: String): String = {
    var tblDataSql = ""
    if (!shardingSwitch) {
      tblDataSql = s"select $selectedFields from $pdbAndTbl"
      if (!StringUtils.isBlank(filter)) {
        tblDataSql = s"select $selectedFields from $pdbAndTbl where $filter"
      }
    } else {
      tblDataSql = s"select $selectedFields from $tblName"
      if (!StringUtils.isBlank(filter)) {
        tblDataSql = s"select $selectedFields from $tblName where $filter"
      }
    }
    tblDataSql
  }


  // 生成查询表的SQL
  def generateTableQuery(tblName: String): String = {
    var tableQuery = ""
    if (!shardingSwitch) {
      tableQuery = s"(select $selectedFields from $pdbAndTbl) MySQLSourceTmp"
      if (filter != null && !filter.trim.equals("")){
        tableQuery = s"(select $selectedFields from $pdbAndTbl where $filter) MySQLSourceTmp"
      }
    }else {
      tableQuery = s"(select $selectedFields from $tblName) MySQLSourceTmp"
      if (filter != null && !filter.trim.equals("")){
        tableQuery = s"(select $selectedFields from $tblName where $filter) MySQLSourceTmp"
      }
    }
    tableQuery
  }

  //21202F2938212B3E22272626252E434D
  // 生成高级切分功能的SQL
  def generateFieldBoundQuery(tblName: String, splitField: String): String = {
    var fieldBound = ""
    if (!shardingSwitch) {
      fieldBound = s"(select max(`$splitField`) as max,min(`$splitField`) as min from ${pdbAndTbl}) MySQLFieldBoundTmp"
      if (filter != null && !filter.trim.equals("")) {
        fieldBound = s"(select max(`$splitField`) as max,min(`$splitField`) as min from ${pdbAndTbl} where $filter) MySQLFieldBoundTmp"
      }
    } else {
      fieldBound = s"(select max(`$splitField`) as max,min(`$splitField`) as min from $tblName) MySQLFieldBoundTmp"
      if (filter != null && !filter.trim.equals("")) {
        fieldBound = s"(select max(`$splitField`) as max,min(`$splitField`) as min from $tblName where $filter) MySQLFieldBoundTmp"
      }
    }
    fieldBound
  }


  def checkReadRight(kc: KhaosContext): Unit = {
    val checkResult: Boolean = MetaUtils.checkReadAuth(kc,
      _mysqlConfig.db_name,
      _mysqlConfig.table_name,
      _mysqlConfig.extender.auth.clazz,
      compact(render(_mysqlConfig.extender.auth.params)))
    if (!checkResult) {
      log.error(s"mysql reader init failed, 权限验证未通过!")
      throw new Exception(s"mysql reader init failed, 权限验证未通过!")
    }
  }

  def getJDBCProp: Properties = {
    val prop = new Properties
    prop.put("driver", _driver)
    prop.put("user", _userName)
    prop.put("password", _passWord)
    prop.put("fetchsize", _jdbc_fetch_size)
    prop
  }

  def initMysqlConf(config: String): Unit = {
    implicit val formats: DefaultFormats.type = DefaultFormats

    _mysqlConfig = parse(config, useBigDecimalForDouble = true).extract[MySQLConfig]
    dbName = _mysqlConfig.db_name
    tblName = _mysqlConfig.table_name
    dbAndTbl = s"$dbName.$tblName"
    pdbAndTbl = s"`$dbName`.`$tblName`"
    filter = _mysqlConfig.filter
    shardingSwitch = _mysqlConfig.shardingSwitch
    shardingSourceList = _mysqlConfig.shardingSourceList
    shardingDb = _mysqlConfig.shardingDb
    shardingDbKey = _mysqlConfig.shardingDbKey
    shardingDbValue = _mysqlConfig.shardingDbValue
    shardingDbType = _mysqlConfig.shardingDbType
    shardingTable = _mysqlConfig.shardingTable
    shardingTableKey = _mysqlConfig.shardingTableKey
    shardingTableValue = _mysqlConfig.shardingTableValue
    shardingTableType = _mysqlConfig.shardingTableType
    advancedOptions = _mysqlConfig.advanced_options
    extractFields = _mysqlConfig.extract_fields
    _splite_mode = _mysqlConfig.advanced_options.split_mode.getOrElse(SplitOption())
  }

  def initMysqlConnect(kc: KhaosContext): Unit = {
    _mysqlMeta = MetaUtils.getMysqlMeta(kc,
      _mysqlConfig.db_name,
      _mysqlConfig.table_name,
      _mysqlConfig.extender.meta.clazz,
      compact(render(_mysqlConfig.extender.meta.params)),
      this)

    val connect: MysqlConnect = _mysqlMeta.dsMysqlConnect
    _ip = connect.getHost
    _port = connect.getPort
    _userName = connect.getUserName
    _passWord = connect.getPassWord
  }

  def initShardingMysqlUrl(kc: KhaosContext, dsName: String, dsId: String, dataType: String): String = {
    _mysqlMeta = MetaUtils.getMysqlShardingMeta(kc,
      dsName,
      dsId,
      dataType,
      _mysqlConfig.extender.meta.clazz,
      this
    )

    val connect: MysqlConnect = _mysqlMeta.dsMysqlConnect
    _ip = connect.getHost
    _port = connect.getPort
    _userName = connect.getUserName
    _passWord = connect.getPassWord
    _jdbcUrl = s"jdbc:mysql://${_ip}:${_port}/${_jdbc_url_param}"
    _jdbcUrl
  }

  def getCartesianProd(kc: KhaosContext, index: Int): ListBuffer[String] = {
    var fuzzyMatchSql = ""
    var cartesianProd = new ListBuffer[String]
    var shardingDbArr = new ArrayBuffer[String]()
    var shardingTbArr = new ArrayBuffer[String]()

    //企业云2.4.1新增匹配正则表达式功能，any表示任意字符，regex表示正则(直接取sharding**Value)
    logInfo("shardingDb===>" + shardingDb + "        shardingTable===>" + shardingTable)
    logInfo("shardingDbValue===>" + shardingDbValue + "        shardingTableValue===>" + shardingTableValue)
    if (shardingDbType.equals("any") && shardingTableType.equals("any")) {
      shardingDbArr = getConcatShardingStr(shardingDb, shardingDbValue)
      shardingTbArr = getConcatShardingStr(shardingTable, shardingTableValue)
      shardingDb = analysisAnyCharacters(shardingDb)
      shardingTable = analysisAnyCharacters(shardingTable)
      fuzzyMatchSql = s"(SELECT TABLE_SCHEMA,TABLE_NAME FROM information_schema.tables WHERE TABLE_SCHEMA like '$shardingDb' AND TABLE_NAME like '$shardingTable') fuzzyMatchSqlTmp"
    } else if (shardingDbType.equals("any") && shardingTableType.equals("regex")) {
      shardingDbArr = getConcatShardingStr(shardingDb, shardingDbValue)
      shardingDb = analysisAnyCharacters(shardingDb)
      fuzzyMatchSql = s"(SELECT TABLE_SCHEMA,TABLE_NAME FROM information_schema.tables WHERE TABLE_SCHEMA like '$shardingDb' AND TABLE_NAME regexp '$shardingTable') fuzzyMatchSqlTmp"
    } else if (shardingDbType.equals("regex") && shardingTableType.equals("any")) {
      shardingTbArr = getConcatShardingStr(shardingTable, shardingTableValue)
      shardingTable = analysisAnyCharacters(shardingTable)
      fuzzyMatchSql = s"(SELECT TABLE_SCHEMA,TABLE_NAME FROM information_schema.tables WHERE TABLE_SCHEMA regexp '$shardingDb' AND TABLE_NAME like '$shardingTable') fuzzyMatchSqlTmp"
    } else if (shardingDbType.equals("regex") && shardingTableType.equals("regex")) {
      fuzzyMatchSql = s"(SELECT TABLE_SCHEMA,TABLE_NAME FROM information_schema.tables WHERE TABLE_SCHEMA regexp '$shardingDb' AND TABLE_NAME regexp '$shardingTable') fuzzyMatchSqlTmp"
    }
    logInfo("shardingDbArr===>" + shardingDbArr + "        shardingTbArr===>" + shardingTbArr)
    logInfo("shardingDb===>" + shardingDb + "        shardingTable===>" + shardingTable)
    logInfo("fuzzyMatchSql===>" + fuzzyMatchSql)
    logInfo(s"shardingJDBCUrlArr($index)===>" + shardingJDBCUrlArr(index) + s"     shardingJDBCPropArr($index)===>" + shardingJDBCPropArr(index))

    val rows: Array[Row] = kc.sparkSession.read.jdbc(shardingJDBCUrlArr(index), fuzzyMatchSql, shardingJDBCPropArr(index)).collect()
    val tbs: Array[String] = rows.map((row: Row) => {
      val matchDb: String = row.getAs[String](0)
      val matchTbl: String = row.getAs[String](1)
      s"`$matchDb`.`$matchTbl`"
    })
    tbs.foreach(cartesianProd.append(_: String))
    log.info("===>cartesianProdbefore")
    cartesianProd.foreach(log.info)
    //此处的db和tb数组是为了在下面进行校验是否不同库下包含表,需要去重，此种取法可能会包含有重复库表
    if (shardingDbArr.size == 0) {
      rows.map((row: Row) => {
        shardingDbArr.append(row.getAs[String](0))
      })
    }
    if (shardingTbArr.size == 0) {
      rows.map((row: Row) => {
        shardingTbArr.append(row.getAs[String](1))
      })
    }
    logInfo("shardingDbArr===>" + shardingDbArr + "        shardingTbArr===>" + shardingTbArr)

    val tmpProd = new ListBuffer[String]()
    for (db <- shardingDbArr.distinct) {
      for (table <- shardingTbArr.distinct) {
        //mysql此处判断默认采取大小写不敏感，库表名在底层将自动转为小写
        if (cartesianProd.contains(s"`$db`.`$table`"))
          tmpProd.append(s"`$db`.`$table`")
      }
    }
    cartesianProd = tmpProd


    log.info("===>cartesianProd")
    cartesianProd.foreach(log.info)
    cartesianProd
  }

  def getConcatShardingStr(shardingKey: String, shardingValue: String): ArrayBuffer[String] = {
    val shardingArr = new ArrayBuffer[String]()
    var shardingRes = ""
    val strArr: Array[String] = shardingKey.split("#", -1)
    val matchKeyArr: Array[String] = shardingValue.split(",", -1)

    for (i <- matchKeyArr.indices) {
      shardingRes = strArr(0).concat(matchKeyArr(i)).concat(strArr(2))
      shardingArr.append(shardingRes)
    }
    shardingArr
  }

  //any任意字符模式
  def analysisAnyCharacters(shardingStr: String): String = {
    var shardingKey = ""
    if (shardingStr.matches(regexKey)) {
      val strArr: Array[String] = shardingStr.split("#", -1)
      shardingKey = strArr(0).concat("%").concat(strArr(2))
    } else {
      log.error(s"分库分表名未按 前缀#变量#后缀 规则填写，无法匹配相应库表名")
      throw new Exception(s"分库分表名未按 前缀#变量#后缀 规则填写，无法匹配相应库表名")
    }
    shardingKey
  }


  def getSelectFields: String = {
    val selectFields = new StringBuffer()
    for (ef <- extractFields) {
      if (ef.data_type.equalsIgnoreCase("TIME")) {
        selectFields.append(s"time_format(`${ef.field}`, '%T') as `${ef.field}`,")
      } else {
        selectFields.append(s"`${ef.field}`,")
      }
    }
    selectFields.toString.dropRight(1)
  }

  /**
   * load config properties 配置
   */
  def loadProperties(kc: KhaosContext): Unit = {
    val mysqlProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.mysql.source.").toMap
    log.info("MysqlSource Properties")
    mysqlProperties.foreach { case (k, v) => log.info(k + "   " + v) }
    _jdbc_url_param = mysqlProperties.getOrElse(MysqlConstants.MODULE_MYSQL_SOURCE_JDBC_URL_PARAM, "")
    _jdbc_fetch_size = mysqlProperties.getOrElse(MysqlConstants.MODULE_MYSQL_SOURCE_JDBC_FETCHSIZE, "1000")
    _jdbc_sharding_size = mysqlProperties.getOrElse(MysqlConstants.MODULE_MYSQL_SOURCE_JDBC_PAGING_SIZE, "200").toInt
    _driver = mysqlProperties.getOrElse(MysqlConstants.MODULE_MYSQL_SOURCE_JDBC_DRIVER, "com.mysql.cj.jdbc.Driver") // for support mysql8
  }

  /**
   * 检验切分设置功能的参数是否合规
   *
   * @param splitField    : 切分字段
   * @param splitNums     : 切分数量
   * @param extractFields : 源表字段信息
   * @return
   */
  def checkSplitParams(splitField: String, splitNums: String, extractFields: List[ExtractFieldInfo]): Boolean = {
    var result = false
    if (!StringUtils.isBlank(splitField) && !StringUtils.isBlank(splitNums)) {
      try {
        for (ef <- extractFields) {
          if (splitField.equals(ef.field) && ef.data_type.equals("NUMBER")) {
            if (1 <= splitNums.toInt && splitNums.toInt <= 1000) {
              logInfo(s"切分参数校验通过！切分数量为：$splitNums，切分字段为：$splitField")
              result = true
            } else {
              log.error(s"切分数量输入有误，目前只支持1到1000的整数！实际输入数量为：$splitNums")
            }
          }
        }
        if (!result) {
          log.error(s"切分字段输入有误，目前只支持整型字段！实际输入字段为：$splitField")
        }
      } catch {
        case e: Exception => {
          log.error(s"切分字段或切分数量输入有误，切分字段目前只支持整型字段，切分数量只支持1到1000的整数！实际输入数量为：$splitNums，实际输入字段为：$splitField")
        }
      }
    }
    result
  }

  /**
   * 将最值df数据类型统一转为Long型，以便后续用上下边界调用多分区并行读取api
   * mysql中最值df的类型有IntegerType、LongType和DecimalType(20,0)，oracle中最值df的类型为DecimalType(38, 10)
   *
   * @param df : 不同数据类型的字段最值df
   * @return : 统一转为Long型的字段最值df
   */
  def getLongBoundDF(df: DataFrame): DataFrame = {
    //log.info(s"MySQL FieldBoundDF start schema is: ${df.printSchema}")
    //log.info(s"MySQL FieldBoundDF start is: ${df.show}")
    //需要转换的列名
    val colName: ArrayBuffer[String] = ArrayBuffer[String]()
    val schema: Unit = df.schema.foreach(s => {
      if (!s.dataType.equals(LongType) || s.dataType.equals(DecimalType(38, 10))) {
        colName += s.name
      }
    })

    //字段类型转换
    var df_long: DataFrame = df
    colName.foreach((name: String) => {
      df_long = df_long.withColumn(name, col(name).cast(LongType))
    })
    //log.info(s"MySQL LongBoundDF end schema is: ${df_long.printSchema}")
    //log.info(s"MySQL LongBoundDF end is: ${df_long.show}")
    df_long
  }

  override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema: ArrayBuffer[KhaosStructField] = ArrayBuffer[KhaosStructField]()
    implicit val formats: DefaultFormats.type = DefaultFormats
    val mySQLInfo: MySQLConfig = parse(config, useBigDecimalForDouble = true).extract[MySQLConfig]
    val extrFields: List[ExtractFieldInfo] = mySQLInfo.extract_fields

    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }
}
