package com.kingsoft.dc.khaos.module.spark.source

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.extender.meta.model.ds.SqlServerConnect
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, MetaDataConstants, MysqlConstants, SQLServerConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source._
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util.{DataframeUtils, MetaUtils, TableSplitUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import java.util.Properties
import scala.collection.mutable.{ArrayBuffer, ListBuffer}

/**
 * create by fanshengli on 2021/11/09 10:00
 */
class SqlServerSource extends SourceStrategy with Logging {

  private var _sqlserverMeta: MetaDataEntity = _
  private var _sqlserverConfig: SQLServerConfig = _

  private val _format = "com.microsoft.sqlserver.jdbc.spark"
  //private val _driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
  private var _ip = ""
  private var _port = ""
  private var _userName = ""
  private var _passWord = ""
  private var _jdbcUrl = ""

  private var _jdbc_url_param = ""
  private var _jdbc_fetch_size = "1000"
  private var _jdbc_sharding_size = 200 //多大的文件数量划分一个分片（200MB）

  private var instanceName = ""
  private var dbName = ""
  private var tblName = ""
  private var filter = ""
  private var regexKey = ".*#.*#.*"
  private var advancedOptions: RdbmsAdvancedOption = _
  private var extractFields: List[ExtractFieldInfo] = _
  private var splitOptions: String = ""


  private var selectedFields = ""
  private var executeTableDataSql = ""
  private var _splite_mode: SplitOption = _

  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {

    // 初始化相关变量
    init(kc, config)

    //构造sql语句中需要选中的字段，将特有的time类型转为string类型
    // 初始化 selectedFields time类型：time_format('field_time','%T') as field_time
    selectedFields = getSelectFields

    //拼接sql查询语句，对表进行行筛选和列裁剪
    executeTableDataSql = generateTableQuery(tblName)

    _jdbcUrl = s"jdbc:sqlserver://${_ip}:${_port};databaseName=$instanceName${_jdbc_url_param}"
    log.info(s"jdbc => ${_jdbcUrl}, executeTableDataSql => $executeTableDataSql")
    val prop: Properties = getJDBCProp

    //高级切分开关是否开启
    var df: DataFrame = null
    splitOptions = _splite_mode.split_options.getOrElse("")


    if (advancedOptions.split_mode.nonEmpty && _splite_mode.on_off) {
      //切分打开
      log.info("切分设置打开")
      splitOptions match {
        case SQLServerConstants.SPLIT_WITH_FIELDS =>
          //按字段切分
          log.info("按字段切分")
          df = getSplitWithFieldDF(kc, _sqlserverConfig.extender.meta.clazz, compact(render(_sqlserverConfig.extender.meta.params)), _jdbcUrl, prop, _splite_mode, extractFields)
        case SQLServerConstants.SPLIT_WITH_ESTIMATE =>
          //估值分页
          log.info("按估值切分")
          df = getSplitWithEstimateDF(kc, _sqlserverConfig.extender.meta.clazz, compact(render(_sqlserverConfig.extender.meta.params)), _jdbcUrl, prop)
      }
    } else {
      //切分关闭,按照资源数切分
      log.info("切分设置关闭")
      log.info("按照资源数切分")
      df = getSplitWithCuDF(kc, _sqlserverConfig.extender.meta.clazz, compact(render(_sqlserverConfig.extender.meta.params)), _jdbcUrl, prop)
    }
    df
  }


  /** 初始化参数 */
  def init(kc: KhaosContext, config: String): Unit = {
    implicit val formats: DefaultFormats.type = DefaultFormats
    // 初始化sqlserver conf
    initSQLServerConf(config)

    // load config 配置文件参数
    loadProperties(kc)

    //权限校验
    checkReadRight(kc)

    //初始化mysql connect
    initSQLServerConnect(kc)

  }


  /** 高级功能开启后的读取表(字段切分) */
  def getSplitWithFieldDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties, splitMode: SplitOption, extractFields: List[ExtractFieldInfo]): _root_.org.apache.spark.sql.DataFrame = {
    var df: DataFrame = null

    val tableSplit: DmTableSplit = null; //TableSplitUtils.getTableSplit(kc, dbName, tblName, metaClazz, metaParamsJson)
    //不分表
    if (tableSplit == null) {

      val splitField: String = "\"" + splitMode.split_field + "\""
      val splitNums: String = splitMode.split_nums
      //检验切分参数
      val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
      if (splitParamsCheck) {
        log.info("高级功能切分模式开启成功!")
        df = getPartitionSqlDF(kc, jdbcUrl, prop, tblName, splitField, splitNums.toInt)
      } else {
        log.warn("高级功能切分模式开启失败，转为分页模式执行！")
        df = getPagingSqlDF(kc, jdbcUrl, prop, tblName)
      }
    } else { // 分表, 注意分表实现不能使用成员tblName变量 !!!
      tableSplit.getStrategyType match {
        //枚举分表
        case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => {
          val tblNameList: List[String] = getShardingEnumTableList(kc, tableSplit, metaClazz, metaParamsJson)
          df = getShardingEnumTableDF(kc, jdbcUrl, prop, tblNameList, splitMode)
        }
        //日期分表 读取当前任务批次时间的物理分表
        case TableSplitUtils.StrategyTypeEnum.DATETIME => {
          val tblNameList: List[String] = getShardingDatetimeTableList(kc, tableSplit, metaClazz, metaParamsJson)
          df = getShardingDatetimeTablesDF(kc, jdbcUrl, prop, tblNameList, splitMode)
        }
      }
    }
    df
  }

  /**
   * 按资源切分
   *
   * @param kc
   * @param metaClazz
   * @param metaParamsJson
   * @param jdbcUrl
   * @param prop
   * @return
   */
  def getAdvancedOffDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    var df: DataFrame = null
    val tableSplit: DmTableSplit = null; //TableSplitUtils.getTableSplit(kc, dbName, tblName, metaClazz, metaParamsJson)
    // 不分表
    if (tableSplit == null) {
      df = getPagingSqlDF(kc, jdbcUrl, prop, tblName)
    } else { //分表
      tableSplit.getStrategyType match {
        case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => { //枚举分表
          val tblNameList: List[String] = getShardingEnumTableList(kc, tableSplit, metaClazz, metaParamsJson)
          df = getShardingEnumTableDF(kc, jdbcUrl, prop, tblNameList, null)
        }
        //日期分表
        case TableSplitUtils.StrategyTypeEnum.DATETIME => {
          val tblNameList: List[String] = getShardingDatetimeTableList(kc, tableSplit, metaClazz, metaParamsJson)
          df = getShardingDatetimeTablesDF(kc, jdbcUrl, prop, tblNameList, null)
        }
      }
    }
    df
  }

  def getSplitWithEstimateDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    getAdvancedOffDF(kc, metaClazz, metaParamsJson, jdbcUrl, prop)
  }

  def getSplitWithCuDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    splitOptions = SQLServerConstants.SPLIT_WITH_CU
    getAdvancedOffDF(kc, metaClazz, metaParamsJson, jdbcUrl, prop)
  }


  //  def getShardingSplitWithCuDF(kc: KhaosContext, data: DataFrame): DataFrame = {
  //    splitOptions = MysqlConstants.SPLIT_WITH_CU
  //    getShardingPagingDF(kc, data)
  //  }
  //
  //  def getShardingSplitWithEstimateDF(kc: KhaosContext, data: DataFrame): DataFrame = {
  //    getShardingPagingDF(kc, data)
  //  }
  //
  //  def getShardingSplitWithFieldDF(kc: KhaosContext, data: DataFrame): DataFrame = {
  //    var resultData: DataFrame = data
  //    val splitMode: SplitOption = advancedOptions.split_mode.get
  //    val splitField: String = splitMode.split_field
  //    val splitNums: String = splitMode.split_nums
  //    //检验切分参数
  //    val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
  //    if (splitParamsCheck) {
  //      log.info("高级功能切分模式开启成功!")
  //      resultData = getShardingPartitionDF(kc, resultData, splitField, splitNums)
  //    } else {
  //      log.warn("高级功能切分模式开启失败，估算/分页查询！")
  //      resultData = getShardingSplitWithCuDF(kc, resultData)
  //    }
  //    resultData
  //  }

  def getShardingDatetimeTableList(kc: KhaosContext, tableSplit: DmTableSplit, metaClazz: String, metaParamsJson: String): List[String] = {
    var tblNameList: List[String] = List[String]()
    val jobBizDate: String = kc.conf.getString(SchedulerConstants.BIZ_DATE)
    val splitTime: String = tableSplit.getStrategyValue match {
      case TableSplitUtils.StrategyValueEnum.year => jobBizDate.substring(0, 4)
      case TableSplitUtils.StrategyValueEnum.month => jobBizDate.substring(0, 6)
      case TableSplitUtils.StrategyValueEnum.day => jobBizDate
    }
    val splitValues: scala.List[String] = scala.List[String](splitTime)
    //获取分表名称列表
    tblNameList = TableSplitUtils.getRealTable(kc, dbName, tblName, metaClazz, metaParamsJson, this, tableSplit, "=", splitValues)
    //    tblNameList = tblNameList.map(tblname => tblname.toLowerCase())
    if (tblNameList.isEmpty) {
      throw new Exception(s"读取的分表不存在=>tableName:${_sqlserverConfig.table_name},分表值:${splitValues.mkString(",")}")
    }
    val tblNames: String = tblNameList.mkString(",")
    log.info(s"Sharding Datetime tables => $tblNames")
    tblNameList
  }


  /**
   * 获取枚举分表tablelist
   */
  def getShardingEnumTableList(kc: KhaosContext, tableSplit: DmTableSplit, metaClazz: String, metaParamsJson: String): List[String] = {
    var tblNameList: List[String] = List[String]()
    val splitValues: List[String] = tableSplit.getStrategyValue.split(",").toList
    //获取分表名称列表
    tblNameList = TableSplitUtils.getRealTable(kc, dbName, tblName, metaClazz, metaParamsJson, this, tableSplit, "in", splitValues)
    //    tblNameList = tblNameList.map(tblname => tblname.toLowerCase())
    val tblNames: String = tblNameList.mkString(",")
    log.info(s"Sharding Enum tables => $tblNames")
    tblNameList
  }

  /**
   * 时间分表 getDF实现, 实际为一个表
   *
   * @param kc
   * @param jdbcUrl
   * @param prop
   * @param tblNameList
   * @param splitMode
   * @return
   */
  def getShardingDatetimeTablesDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tblNameList: List[String], splitMode: SplitOption): DataFrame = {
    var datetimeTablesDF: DataFrame = null
    val oneTableName: String = tblNameList.head

    if (splitMode == null) {
      log.info("高级分区功能关闭, 采用分页执行!")
      datetimeTablesDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)

    } else {
      val splitField: String = splitMode.split_field
      val splitNums: String = splitMode.split_nums

      //检验切分参数
      val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
      if (splitParamsCheck) {
        log.info("检测高级参数通过, 开启成功!")
        datetimeTablesDF = getPartitionSqlDF(kc, jdbcUrl, prop, oneTableName, splitField, splitNums.toInt)
      } else {
        log.warn("高级功能切分模式开启失败, 转为分页模式执行！")
        datetimeTablesDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
      }
    }
    datetimeTablesDF
  }


  /**
   * 枚举分表 getDF实现, 实际为多表
   */
  def getShardingEnumTableDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tblNameList: List[String], splitMode: SplitOption): DataFrame = {
    //遍历分表名称进行读取 然后UNION
    var enumTablesDF: DataFrame = null
    tblNameList.foreach((oneTableName: String) => {
      val index: Int = tblNameList.indexOf(oneTableName)
      var oneTableDF: DataFrame = null

      if (splitMode == null) { //分页
        log.warn("高级功能切分模式关闭, 进入分表枚举分支, 采用分页模式执行！")
        oneTableDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
      } else { // 高级参数开启,分区模式
        val splitField: String = splitMode.split_field
        val splitNums: String = splitMode.split_nums
        //检验切分参数
        val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
        if (splitParamsCheck) {
          log.info("检测高级参数通过, 开启成功!")
          oneTableDF = getPartitionSqlDF(kc, jdbcUrl, prop, oneTableName, splitField, splitNums.toInt)
        } else {
          log.warn("高级功能切分模式开启失败，转为分页模式执行！")
          oneTableDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
        }
      }

      if (index == 0) {
        enumTablesDF = oneTableDF
      } else {
        enumTablesDF = enumTablesDF.union(oneTableDF)
      }

    })
    enumTablesDF
  }

  /**
   * spark默认分区实现,基于索引或主键上下界
   */
  def getPartitionSqlDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tableName: String, splitField: String, splitNums: Int): DataFrame = {
    val rangeSql: String = generateFieldBoundQuery(tableName, splitField)
    log.info(s"rangeSql => $rangeSql")
    //val rangeDF: DataFrame = kc.sparkSession.read.jdbc(jdbcUrl, rangeSql, prop)
    val rangeDF: DataFrame = kc.sparkSession.read.format(_format).jdbc(jdbcUrl, rangeSql, prop)
    val rangeBoundDF: Array[Row] = getLongBoundDF(rangeDF).collect

    val maxNum: Long = rangeBoundDF(0)(0).asInstanceOf[Long]
    val minNum: Long = rangeBoundDF(0)(1).asInstanceOf[Long]
    val oneExecuteTableDataSql: String = generateTableQuery(tableName)
    //kc.sparkSession.read.jdbc(jdbcUrl, oneExecuteTableDataSql, splitField, minNum, maxNum, splitNums.toInt, prop)
    kc.sparkSession.read.format(_format).jdbc(jdbcUrl, oneExecuteTableDataSql, splitField, minNum, maxNum, splitNums, prop)
  }

  /**
   * 分页实现 limit 0,1000
   * limit 1000,1000 ...
   *
   * @param kc
   * @param prop
   * @return
   */
  def getPagingSqlDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tableName: String): DataFrame = {
    var df: DataFrame = null
    val countSql: String = getCountTableSql(tableName)
    //val countDF: Array[Row] = kc.sparkSession.read.jdbc(jdbcUrl, countSql, prop).collect()
    val countDF: Array[Row] = kc.sparkSession.read.format(_format).jdbc(jdbcUrl, countSql, prop).collect()
    val cnt: Integer = countDF(0)(0).asInstanceOf[Integer]
    val count: Long = cnt.toLong
    log.info(s"countSql总个数： $countSql => $count")

    val sampleDF: DataFrame = getSampleDF(kc, jdbcUrl, tableName, prop, count, "\"" + extractFields(0).field + "\"")
    //val nonExecuteTableDataSql: String = getTableDataSql(tableName)
    //select top 10 * from dbo.student where stu_name not in (select top 0 stu_name from dbo.student)
    val sqls: ArrayBuffer[String] = getPagingSqls(kc, tableName, count, sampleDF, "\"" + extractFields(0).field + "\"")

    kc.conf.set(CommonConstants.DATA_COUNT_SET_NUMS, String.valueOf(count))

    for (sql <- sqls) {
      val index: Int = sqls.indexOf(sql)
      if (index == 0) {
        df = kc.sparkSession.read.format(_format).jdbc(jdbcUrl, sql, prop)
      } else {
        df = df.union(kc.sparkSession.read.format(_format).jdbc(jdbcUrl, sql, prop))
      }
    }
    df
  }

  def getCountTableSql(tblName: String): String = {
    var countSql = s"(select count(1) AS cnt from ${dbName}.$tblName) sqlserverCount"
    if (!StringUtils.isBlank(filter)) {
      countSql = s"(select count(1) AS cnt from ${dbName}.$tblName where $filter) sqlserverCount"
    }
    log.info(s"countSql => $countSql")
    countSql
  }

  /**
   * 随机抽样10条行数据union到DataFrame中
   *
   * @param kc
   * @param jdbcUrl
   * @param tableName
   * @param prop
   * @param count
   * @return
   */
  def getSampleDF(kc: KhaosContext, jdbcUrl: String, tableName: String, prop: Properties, count: Long, fieldName: String): DataFrame = {
    var df: DataFrame = null
    var sampleSql = ""
    var randNum = 0
    for (i <- 0 to 9) {
      if (count.toInt < 1) {
        randNum = scala.util.Random.nextInt(1)
      } else {
        randNum = scala.util.Random.nextInt(count.toInt)
      }
      sampleSql = s"(select top 10 * from ${dbName}.$tableName) sqlserverSample"
      //      sampleSql = s"(select top 1 * from ${dbName}.$tableName where $fieldName not in (select top $randNum $fieldName from ${dbName}.$tableName)) sqlserverSample"
      log.info(s"sampleSql => $sampleSql")
      if (i == 0) {
        //df = kc.sparkSession.read.jdbc(jdbcUrl, sampleSql, prop)
        df = kc.sparkSession.read.format(_format).jdbc(jdbcUrl, sampleSql, prop)
      } else {
        //df = df.union(kc.sparkSession.read.jdbc(jdbcUrl, sampleSql, prop))
        df = df.union(kc.sparkSession.read.format(_format).jdbc(jdbcUrl, sampleSql, prop))
      }
    }
    df
  }

  /**
   * 分析生成 并发数量=总数据量/切片数量 分页查询sql数组（若是评估，每200M分一个segmentations（最多200个切片））
   *
   * @param kc
   * @param tableName
   * @param count
   * @param sampleDF
   * @return
   */
  def getPagingSqls(kc: KhaosContext, tableName: String, count: Long, sampleDF: DataFrame, fieldName: String): ArrayBuffer[String] = {
    var sqls = new ArrayBuffer[String]
    var segmentations = 1
    splitOptions match {
      case SQLServerConstants.SPLIT_WITH_ESTIMATE =>
        segmentations = DataframeUtils.estimateTaskSegmentation(count, sampleDF, _jdbc_sharding_size)
      case SQLServerConstants.SPLIT_WITH_CU =>
        //20210924企业云2.0.8修复oracle2hive同步count效率问题
        segmentations = kc.sparkSession.conf.get("spark.dynamicAllocation.maxExecutors", "2").toInt
    }
    logInfo("分区数量segmentations: " + segmentations)
    var partitionNum: Int = Math.ceil(count / segmentations).toInt //计算每页查询条数,当总数据量<分片数量时，指定每个分区1条数据进行查询
    var remainder: Int = Math.ceil(count % segmentations).toInt
    if (remainder > 0) partitionNum = partitionNum + 1

    logInfo("每个分区（即每页查询条数）partitionNum: " + partitionNum)
    for (step <- 0 until segmentations) {
      val begin: Int = step * partitionNum
      val tempSql = getTableDataSql(tableName, "\"" + extractFields(0).field + "\"", begin, partitionNum)
      var sql = s"(select $selectedFields from ($tempSql) as r where r.di_row_id>$begin and r.di_row_id<=$begin+$partitionNum) pagingSql"
      //最后一个分片
      if (step == segmentations - 1) {
        sql = s"(select $selectedFields from ($tempSql) as r where r.di_row_id>$begin and r.di_row_id<=$count) pagingSql"
      }
      sqls += sql
      log.info(s"Paging Sql: $step => $sql")
    }
    sqls
  }

  def getTableDataSql(tblName: String): String = {
    var tblDataSql = s"select $selectedFields from ${dbName}.$tblName"
    if (!StringUtils.isBlank(filter)) {
      tblDataSql = s"select $selectedFields from ${dbName}.$tblName where $filter"
    }
    tblDataSql
  }

  /**
   * sqlserver分页SQL拼接
   *
   * @param tblName
   * @param fieldName
   * @param begin
   * @param partitionNum
   * @return
   */
  def getTableDataSql(tblName: String, fieldName: String, begin: Integer, partitionNum: Int): String = {
    var tblDataSql = s"select $selectedFields, ROW_NUMBER() OVER(ORDER BY $fieldName) as di_row_id from ${dbName}.$tblName"
    if (!StringUtils.isBlank(filter)) {
      tblDataSql = s"select $selectedFields, ROW_NUMBER() OVER(ORDER BY $fieldName) as di_row_id from ${dbName}.$tblName where $filter"
    }
    tblDataSql
  }


  // 生成查询表的SQL
  def generateTableQuery(tblName: String): String = {
    var tableQuery = s"(select $selectedFields from ${dbName}.$tblName) SQLServerSourceTmp"
    if (filter != null && !filter.trim.equals("")) {
      tableQuery = s"(select $selectedFields from ${dbName}.$tblName where $filter) SQLServerSourceTmp"
    }
    tableQuery
  }

  //21202F2938212B3E22272626252E434D
  // 生成高级切分功能的SQL
  def generateFieldBoundQuery(tblName: String, splitField: String): String = {
    var fieldBound = s"(select max($splitField) as max_v,min($splitField) as min_v from ${dbName}.${tblName}) SqlServerFieldBoundTmp"
    if (filter != null && !filter.trim.equals("")) {
      fieldBound = s"(select max($splitField) as max_v,min($splitField) as min_v from ${dbName}.${tblName} where $filter) SqlServerFieldBoundTmp"
    }
    fieldBound
  }


  /**
   * 检查是否有读权限
   *
   * @param kc
   */
  def checkReadRight(kc: KhaosContext): Unit = {
    val checkResult: Boolean = MetaUtils.checkReadAuth(kc,
      _sqlserverConfig.db_name,
      _sqlserverConfig.table_name,
      _sqlserverConfig.extender.auth.clazz,
      compact(render(_sqlserverConfig.extender.auth.params)))
    if (!checkResult) {
      log.error(s"mysql reader init failed, 权限验证未通过!")
      throw new Exception(s"mysql reader init failed, 权限验证未通过!")
    }
  }

  def getJDBCProp: Properties = {
    val prop = new Properties
    //prop.put("driver", _driver)
    prop.put("user", _userName)
    prop.put("password", _passWord)
    prop.put("fetchsize", _jdbc_fetch_size)
    prop
  }

  /**
   * 初始化SQLServer基本属性值
   *
   * @param config
   */
  def initSQLServerConf(config: String): Unit = {
    implicit val formats: DefaultFormats.type = DefaultFormats

    _sqlserverConfig = parse(config, useBigDecimalForDouble = true).extract[SQLServerConfig]
    dbName = "\"" + _sqlserverConfig.db_name + "\""
    tblName = "\"" + _sqlserverConfig.table_name + "\""
    filter = _sqlserverConfig.filter
    advancedOptions = _sqlserverConfig.advanced_options
    extractFields = _sqlserverConfig.extract_fields
    _splite_mode = _sqlserverConfig.advanced_options.split_mode.getOrElse(SplitOption())

  }

  def initSQLServerConnect(kc: KhaosContext): Unit = {
    _sqlserverMeta = MetaUtils.getSQLServerMeta(kc,
      _sqlserverConfig.db_name,
      _sqlserverConfig.table_name,
      _sqlserverConfig.extender.meta.clazz,
      compact(render(_sqlserverConfig.extender.meta.params)),
      this)

    val connect: SqlServerConnect = _sqlserverMeta.dsSqlServerConnect
    instanceName = connect.getInstanceName
    _ip = connect.getHost
    _port = connect.getPort
    _userName = connect.getUserName
    _passWord = connect.getPassWord
  }

  def getSelectFields: String = {
    val selectFields = new StringBuffer()
    for (ef <- extractFields) {
      //时间 , 务必转换格式->24小时制 (小时hh:分钟mm:秒数ss)
      if (ef.data_type.equalsIgnoreCase("TIME")) {
        selectFields.append(s"""CONVERT(varchar(10),\"${ef.field}\",8) as \"${ef.field}\",""")
      } else {
        selectFields.append(s"""\"${ef.field}\",""")
      }
    }
    selectFields.toString.dropRight(1)
  }

  /**
   * load config properties 配置
   */
  def loadProperties(kc: KhaosContext): Unit = {
    val sqlServerProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.sqlserver.source.").toMap
    log.info("sqlServerProperties Properties")
    sqlServerProperties.foreach { case (k, v) => log.info(k + "   " + v) }
    _jdbc_url_param = sqlServerProperties.getOrElse(SQLServerConstants.MODULE_SQLSERVER_SOURCE_JDBC_URL_PARAM, "")
    _jdbc_fetch_size = sqlServerProperties.getOrElse(SQLServerConstants.MODULE_SQLSERVER_SOURCE_JDBC_FETCHSIZE, "1000")
    _jdbc_sharding_size = sqlServerProperties.getOrElse(SQLServerConstants.MODULE_SQLSERVER_SOURCE_JDBC_PAGING_SIZE, "200").toInt
  }

  /**
   * 检验切分设置功能的参数是否合规
   *
   * @param splitField    : 切分字段
   * @param splitNums     : 切分数量
   * @param extractFields : 源表字段信息
   * @return
   */
  def checkSplitParams(splitField: String, splitNums: String, extractFields: List[ExtractFieldInfo]): Boolean = {
    var result = false
    if (!StringUtils.isBlank(splitField) && !StringUtils.isBlank(splitNums)) {
      try {
        for (ef <- extractFields) {
          if (splitField.equals("\"" + ef.field + "\"") && ef.data_type.equals("NUMBER")) {
            if (1 <= splitNums.toInt && splitNums.toInt <= 1000) {
              logInfo(s"切分参数校验通过！切分数量为：$splitNums，切分字段为：$splitField")
              result = true
            } else {
              log.error(s"切分数量输入有误，目前只支持1到1000的整数！实际输入数量为：$splitNums")
            }
          }
        }
        if (!result) {
          log.error(s"切分字段输入有误，目前只支持整型字段！实际输入字段为：$splitField")
        }
      } catch {
        case e: Exception => {
          log.error(s"切分字段或切分数量输入有误，切分字段目前只支持整型字段，切分数量只支持1到1000的整数！实际输入数量为：$splitNums，实际输入字段为：$splitField")
        }
      }
    }
    result
  }

  /**
   * 将最值df数据类型统一转为Long型，以便后续用上下边界调用多分区并行读取api
   * mysql中最值df的类型有IntegerType、LongType和DecimalType(20,0)，oracle中最值df的类型为DecimalType(38, 10)
   *
   * @param df : 不同数据类型的字段最值df
   * @return : 统一转为Long型的字段最值df
   */
  def getLongBoundDF(df: DataFrame): DataFrame = {
    //log.info(s"MySQL FieldBoundDF start schema is: ${df.printSchema}")
    //log.info(s"MySQL FieldBoundDF start is: ${df.show}")
    //需要转换的列名
    val colName: ArrayBuffer[String] = ArrayBuffer[String]()
    val schema: Unit = df.schema.foreach(s => {
      if (!s.dataType.equals(LongType) || s.dataType.equals(DecimalType(38, 10))) {
        colName += s.name
      }
    })

    //字段类型转换
    var df_long: DataFrame = df
    colName.foreach((name: String) => {
      df_long = df_long.withColumn(name, col(name).cast(LongType))
    })
    //log.info(s"MySQL LongBoundDF end schema is: ${df_long.printSchema}")
    //log.info(s"MySQL LongBoundDF end is: ${df_long.show}")
    df_long
  }

  override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema: ArrayBuffer[KhaosStructField] = ArrayBuffer[KhaosStructField]()
    implicit val formats: DefaultFormats.type = DefaultFormats
    val mySQLInfo: MySQLConfig = parse(config, useBigDecimalForDouble = true).extract[MySQLConfig]
    val extrFields: List[ExtractFieldInfo] = mySQLInfo.extract_fields

    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }

}
