package com.kingsoft.dc.khaos.module.spark.source

import java.util.Properties
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.extender.meta.model.ds.OracleConnect
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.OracleConstants.MODULE_ORACLE_SOURCE_JDBC_OPTIONS
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, MetaDataConstants, OracleConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source.{ExtractFieldInfo, OracleConfig, RdbmsAdvancedOption, ShardingSourceInfo, SplitOption}
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util.{DataframeUtils, MetaUtils, TableSplitUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.util.{Failure, Success, Try}

/**
 * Created by WANGYING15 on 2019/6/12.
 */
class OracleSource extends SourceStrategy with Logging {

  private var oracleMeta: MetaDataEntity = _
  private var oracleConfig: OracleConfig = _

  private val driver = "oracle.jdbc.driver.OracleDriver"
  private var host = ""
  private var userName = ""
  private var passWord = ""

  private var dbName = ""
  private var tblName = ""
  private var dbAndTbl = ""
  private var filter = ""
  private var shardingSwitch = false
  private var shardingSourceList: List[ShardingSourceInfo] = _
  private var shardingDb = ""
  private var shardingDbKey = ""
  private var shardingDbType = ""
  private var shardingDbValue = ""
  private var shardingTable = ""
  private var shardingTableKey = ""
  private var shardingTableType = ""
  private var shardingTableValue = ""
  private val regexKey = ".*#.*#.*"
  private var advancedOptions: RdbmsAdvancedOption = _
  private var extractFields: List[ExtractFieldInfo] = _
  private var splitMode: SplitOption = _
  private var splitOptions: String = ""
  private var selected_Fields = ""

  private val shardingJDBCUrlArr = new ArrayBuffer[String]()
  private val shardingJDBCPropArr = new ArrayBuffer[Properties]()
  private var cartesianProdTable: ListBuffer[String] = _

  private var _oraclesource_number2string = false

  private var _jdbc_fetch_size = "1000"
  private var _jdbc_sharding_size = 600

  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {
    var meta_origin: String = kc.conf.getString(SchedulerConstants.META_ORIGIN, MetaDataConstants.META_DATAMANAGEMENT)
    meta_origin match {
      case MetaDataConstants.META_DATAMANAGEMENT =>
        this.read(kc, module_id, config, dependence)
      case MetaDataConstants.META_DBSERVER =>
        val dbs: OracleSourceDbs = new OracleSourceDbs
        dbs.source(kc, module_id, config, dependence)
      case _ =>
        throw new Exception(s"oracleSource meta_origin 参数错误! ==> $meta_origin")
    }

  }

  def read(kc: KhaosContext,
           module_id: String,
           config: String,
           dependence: Dependency): DataFrame = {
    init(kc, config)
    val prop: Properties = getJDBCProp(kc)

    //拼接sql查询语句
    val url: String = getOracleConnectTypeURL(oracleMeta.dsOracleConnect)
    log.info(s"Oracle jdbcUrl is: $url")

    //切分模式、分库分表是否开启
    var data: DataFrame = null
    if (shardingSwitch) {
      log.info("分库分表模式打开")
      initShardingDbAndTb(kc)
      splitOptions match {
        case OracleConstants.SPLIT_WITH_FIELDS =>
          log.info("切分设置打开, 按字段切分")
          data = getShardingSplitWithFieldDF(kc)
        case OracleConstants.SPLIT_WITH_ESTIMATE =>
          log.info("切分设置打开, 按估值切分")
          data = getShardingSplitWithEstimateDF(kc)
        case OracleConstants.SPLIT_WITH_CU =>
          log.info("切分设置关闭, 按照资源数切分")
          data = getShardingSplitWithCuDF(kc)
      }
    } else {
      //分库分表关闭时,需支持数据管理处的时间分表/枚举分表
      log.info("分库分表模式关闭")
      splitOptions match {
        case OracleConstants.SPLIT_WITH_FIELDS =>
          log.info("切分设置打开, 按字段切分")
          data = getSplitWithFieldDF(kc, oracleConfig.extender.meta.clazz, compact(render(oracleConfig.extender.meta.params)), url, prop)
        case OracleConstants.SPLIT_WITH_ESTIMATE =>
          log.info("切分设置打开, 按估值切分")
          data = getSplitWithEstimateDF(kc, oracleConfig.extender.meta.clazz, compact(render(oracleConfig.extender.meta.params)), url, prop)
        case OracleConstants.SPLIT_WITH_CU =>
          log.info("切分设置关闭, 按照资源数切分")
          data = getSplitWithCuDF(kc, oracleConfig.extender.meta.clazz, compact(render(oracleConfig.extender.meta.params)), url, prop)
      }
    }
    data
  }

  /** 初始化分库分表 */
  def initShardingDbAndTb(kc: KhaosContext): Unit = {
    //获取jdbcurl和prop
    for (i <- shardingSourceList) {
      shardingJDBCUrlArr.append(initOracleConnect(kc, i.dsName, i.dsId, i.dataType))
      shardingJDBCPropArr.append(getJDBCProp(kc))
    }
  }

  def getSplitWithEstimateDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    getAdvancedOffDF(kc, metaClazz, metaParamsJson, jdbcUrl, prop)
  }

  def getSplitWithCuDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    getAdvancedOffDF(kc, metaClazz, metaParamsJson, jdbcUrl, prop)
  }

  def getAdvancedOffDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): DataFrame = {
    var df: DataFrame = null
    val tableSplit: DmTableSplit = TableSplitUtils.getTableSplit(kc, dbName, tblName,
      metaClazz,
      metaParamsJson)
    // 不分表
    if (tableSplit == null) {
      df = getPagingSqlDF(kc, jdbcUrl, prop, dbAndTbl)
    } else { //分表
      var tblNameList: List[String] = List.empty
      tableSplit.getStrategyType match {
        //枚举分表
        case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM =>
          tblNameList = getShardingEnumTableList(kc, tableSplit, metaClazz, metaParamsJson)
        //日期分表
        case TableSplitUtils.StrategyTypeEnum.DATETIME =>
          tblNameList = getShardingDatetimeTableList(kc, tableSplit, metaClazz, metaParamsJson)
      }
      df = readMultiTables(kc, jdbcUrl, prop, tblNameList, splitOptions)
    }
    df
  }

  def getShardingSplitWithCuDF(kc: KhaosContext): DataFrame = {
    getShardingPagingDF(kc)
  }

  def getShardingSplitWithEstimateDF(kc: KhaosContext): DataFrame = {
    getShardingPagingDF(kc)
  }

  def getShardingSplitWithFieldDF(kc: KhaosContext): DataFrame = {
    var resultData: DataFrame = null
    val splitMode: SplitOption = advancedOptions.split_mode.get
    val splitField: String = processName(splitMode.split_field)
    val splitNums: String = splitMode.split_nums
    //检验切分参数
    val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
    if (splitParamsCheck) {
      log.info("高级功能切分模式开启成功!")
      resultData = getShardingPartitionDF(kc, splitField, splitNums)
    } else {
      log.warn("高级功能切分模式开启失败，估算/分页查询！")
      resultData = getShardingSplitWithCuDF(kc)
    }
    resultData
  }

  def getShardingPagingDF(kc: KhaosContext): DataFrame = {
    var resultData: DataFrame = null
    for (i <- shardingSourceList.indices) {
      //重置库表正则&笛卡尔积,
      shardingDb = oracleConfig.shardingDb
      shardingTable = oracleConfig.shardingTable
      cartesianProdTable = getCartesianProd(kc, i)
      for (tb <- cartesianProdTable) {
        val index: Int = cartesianProdTable.indexOf(tb)
        if (index == 0 && i == 0) {
          resultData = getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb)
        } else {
          val t = Try(resultData.first())
          t match {
            case Failure(exception) => resultData = getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb)
            case Success(value) =>
              val f = Try(getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb).first())
              f match {
                case Failure(exception) => resultData = resultData
                case Success(value) => resultData = resultData.union(getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb))
              }
          }
          //          if (resultData.isEmpty) {
          //            resultData = getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb)
          //          } else if (getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb).isEmpty) {
          //            resultData = resultData
          //          } else {
          //            resultData = resultData.union(getPagingSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb))
          //          }

        }
      }
    }
    resultData
  }

  /** 高级功能开启后的读取表(字段切分) */
  def getSplitWithFieldDF(kc: KhaosContext, metaClazz: String, metaParamsJson: String, jdbcUrl: String, prop: Properties): _root_.org.apache.spark.sql.DataFrame = {
    var df: DataFrame = null
    val tableSplit: DmTableSplit = TableSplitUtils.getTableSplit(kc, dbName, tblName, metaClazz, metaParamsJson)
    //不分表
    if (tableSplit == null) {
      df = getSplitFieldDF(kc, jdbcUrl, prop, dbAndTbl)
    } else { // 分表, 注意分表实现不能使用成员tblName变量 !!!
      var tblNameList: List[String] = List.empty
      tableSplit.getStrategyType match {
        //枚举分表
        case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => {
          tblNameList = getShardingEnumTableList(kc, tableSplit, metaClazz, metaParamsJson)
        }
        //日期分表 读取当前任务批次时间的物理分表
        case TableSplitUtils.StrategyTypeEnum.DATETIME => {
          tblNameList = getShardingDatetimeTableList(kc, tableSplit, metaClazz, metaParamsJson)
        }
      }
      df = readMultiTables(kc, jdbcUrl, prop, tblNameList, splitOptions)
    }
    df
  }

  /**
   * 获取枚举分表tablelist
   */
  def getShardingEnumTableList(kc: KhaosContext, tableSplit: DmTableSplit, metaClazz: String, metaParamsJson: String): List[String] = {
    var tblNameList: List[String] = List[String]()
    val splitValues: List[String] = tableSplit.getStrategyValue.split(",").toList
    //获取分表名称列表
    tblNameList = TableSplitUtils.getRealTable(kc, dbName, tblName, metaClazz, metaParamsJson, this, tableSplit, "in", splitValues)
    val tblNames: String = tblNameList.mkString(",")
    log.info(s"Sharding Enum tables => $tblNames")
    tblNameList = tblNameList.map(processName)
    tblNameList
  }

  def getShardingDatetimeTableList(kc: KhaosContext, tableSplit: DmTableSplit, metaClazz: String, metaParamsJson: String): List[String] = {
    var tblNameList: List[String] = List[String]()
    val jobBizDate: String = kc.conf.getString(SchedulerConstants.BIZ_DATE)
    val splitTime: String = tableSplit.getStrategyValue match {
      case TableSplitUtils.StrategyValueEnum.year => jobBizDate.substring(0, 4)
      case TableSplitUtils.StrategyValueEnum.month => jobBizDate.substring(0, 6)
      case TableSplitUtils.StrategyValueEnum.day => jobBizDate
    }
    val splitValues: scala.List[String] = scala.List[String](splitTime)
    //获取分表名称列表
    tblNameList = TableSplitUtils.getRealTable(kc, dbName, tblName, metaClazz, metaParamsJson, this, tableSplit, "=", splitValues)
    if (tblNameList.isEmpty) {
      throw new Exception(s"读取的分表不存在=>tableName:${oracleConfig.table_name},分表值:${splitValues.mkString(",")}")
    }
    val tblNames: String = tblNameList.mkString(",")
    log.info(s"Sharding Datetime tables => $tblNames")
    tblNameList = tblNameList.map(processName)
    tblNameList
  }

  def readMultiTables(kc: KhaosContext, jdbcUrl: String, prop: Properties, tblNameList: List[String], splitOptions: String): DataFrame = {
    //遍历分表名称进行读取 然后UNION
    var multiTablesDF: DataFrame = null
    tblNameList.foreach((oneTableName: String) => {
      val index: Int = tblNameList.indexOf(oneTableName)
      var oneTableDF: DataFrame = null

      splitOptions match {
        case OracleConstants.SPLIT_WITH_FIELDS =>
          log.info("按字段切分 table:{}", oneTableName)
          oneTableDF = getSplitFieldDF(kc, jdbcUrl, prop, oneTableName)
        case OracleConstants.SPLIT_WITH_ESTIMATE =>
          log.info("按估值切分 table:{}", oneTableName)
          oneTableDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
        case OracleConstants.SPLIT_WITH_ESTIMATE =>
          log.info("按照资源数切分 table:{}", oneTableName)
          oneTableDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
      }

      if (index == 0) {
        multiTablesDF = oneTableDF
      } else {
        multiTablesDF = multiTablesDF.union(oneTableDF)
      }

    })
    multiTablesDF
  }


  def getShardingPartitionDF(kc: KhaosContext, splitField: String, splitNums: String): DataFrame = {
    var resultData: DataFrame = null
    for (i <- shardingSourceList.indices) {
      //重置库表正则&笛卡尔积,
      shardingDb = oracleConfig.shardingDb
      shardingTable = oracleConfig.shardingTable
      cartesianProdTable = getCartesianProd(kc, i)
      for (tb <- cartesianProdTable) {
        val index: Int = cartesianProdTable.indexOf(tb)
        if (index == 0 && i == 0) {
          resultData = getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt)
        } else {
          val t = Try(resultData.first())
          t match {
            case Failure(exception) => resultData = getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt)
            case Success(value) =>
              val f = Try(getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt).first())
              f match {
                case Failure(exception) => resultData = resultData
                case Success(value) => resultData = resultData.union(getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt))
              }
          }

          //          if (resultData.isEmpty) {
          //            resultData = getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt)
          //          } else if (getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt).isEmpty) {
          //            resultData = resultData
          //          } else {
          //            resultData = resultData.union(getPartitionSqlDF(kc, shardingJDBCUrlArr(i), shardingJDBCPropArr(i), tb, splitField, splitNums.toInt))
          //          }

        }
      }
    }
    resultData
  }

  def getCartesianProd(kc: KhaosContext, index: Int): ListBuffer[String] = {
    var fuzzyMatchDbSql = ""
    var fuzzyMatchTbSql = ""
    var cartesianProd = new ListBuffer[String]()
    var shardingDbArr = new ArrayBuffer[String]()
    var shardingTbArr = new ArrayBuffer[String]()

    //企业云2.4.1新增匹配正则表达式功能，any表示任意字符，regex表示正则(直接取sharding**Value)
    logInfo("shardingDb===>" + shardingDb + "        shardingTable===>" + shardingTable)
    logInfo("shardingDbValue===>" + shardingDbValue + "        shardingTableValue===>" + shardingTableValue)
    if (shardingDbType.equals("any") && shardingTableType.equals("any")) {
      shardingDbArr = getConcatShardingStr(shardingDb, shardingDbValue)
      shardingTbArr = getConcatShardingStr(shardingTable, shardingTableValue)
      shardingDb = analysisAnyCharacters(shardingDb)
      shardingTable = analysisAnyCharacters(shardingTable)
      fuzzyMatchTbSql = s"(SELECT TABLE_NAME from USER_TABLES where TABLE_NAME like '$shardingTable') fuzzyMatchTbSqlTmp"
      fuzzyMatchDbSql = s"(SELECT USERNAME from ALL_USERS where USERNAME like '$shardingDb') fuzzyMatchDbSqlTmp"
    } else if (shardingDbType.equals("any") && shardingTableType.equals("regex")) {
      shardingDbArr = getConcatShardingStr(shardingDb, shardingDbValue)
      shardingDb = analysisAnyCharacters(shardingDb)
      fuzzyMatchTbSql = s"(SELECT TABLE_NAME from USER_TABLES where REGEXP_LIKE(TABLE_NAME,'$shardingTable')) fuzzyMatchTbSqlTmp"
      fuzzyMatchDbSql = s"(SELECT USERNAME from ALL_USERS where USERNAME like '$shardingDb') fuzzyMatchDbSqlTmp"
    } else if (shardingDbType.equals("regex") && shardingTableType.equals("any")) {
      shardingTbArr = getConcatShardingStr(shardingTable, shardingTableValue)
      shardingTable = analysisAnyCharacters(shardingTable)
      fuzzyMatchTbSql = s"(SELECT TABLE_NAME from USER_TABLES where TABLE_NAME like '$shardingTable') fuzzyMatchTbSqlTmp"
      fuzzyMatchDbSql = s"(SELECT USERNAME from ALL_USERS where REGEXP_LIKE(USERNAME,'$shardingDb')) fuzzyMatchDbSqlTmp"
    } else if (shardingDbType.equals("regex") && shardingTableType.equals("regex")) {
      fuzzyMatchTbSql = s"(SELECT TABLE_NAME from USER_TABLES where REGEXP_LIKE(TABLE_NAME,'$shardingTable')) fuzzyMatchTbSqlTmp"
      fuzzyMatchDbSql = s"(SELECT USERNAME from ALL_USERS where REGEXP_LIKE(USERNAME,'$shardingDb')) fuzzyMatchDbSqlTmp"
    }

    logInfo("shardingDbArr===>" + shardingDbArr + "        shardingTbArr===>" + shardingTbArr)
    logInfo("shardingDb===>" + shardingDb + "        shardingTable===>" + shardingTable)
    logInfo("fuzzyMatchTbSql===>" + fuzzyMatchTbSql + "       fuzzyMatchDbSql===>" + fuzzyMatchDbSql)
    logInfo(s"shardingJDBCUrlArr($index)===>" + shardingJDBCUrlArr(index) + s"      shardingJDBCPropArr($index)===>" + shardingJDBCPropArr(index))

    val tbRows: Array[Row] = kc.sparkSession.read.jdbc(shardingJDBCUrlArr(index), fuzzyMatchTbSql, shardingJDBCPropArr(index)).collect()
    val dbRows: Array[Row] = kc.sparkSession.read.jdbc(shardingJDBCUrlArr(index), fuzzyMatchDbSql, shardingJDBCPropArr(index)).collect()
    val dbs: Array[String] = dbRows.map((row: Row) => {
      val matchDbl: String = row.getAs[String](0)
      val db: String = processName(matchDbl)
      db
    })
    val tbs: Array[String] = tbRows.map((row: Row) => {
      val matchTbl: String = row.getAs[String](0)
      val table: String = processName(matchTbl)
      table
    })
    for (db <- dbs) {
      for (table <- tbs) {
        cartesianProd.append(s"$db.$table")
      }
    }
    log.info("===>cartesianProdbefore")
    cartesianProd.foreach(log.info)


    //此处的db和tb数组是为了在下面进行校验是否不同库下包含表,需要去重，此种取法可能会包含有重复库表
    if (shardingDbArr.size == 0) {
      dbRows.map((row: Row) => {
        shardingDbArr.append(processName(row.getAs[String](0)))
      })
    }
    if (shardingTbArr.size == 0) {
      tbRows.map((row: Row) => {
        shardingTbArr.append(processName(row.getAs[String](0)))
      })
    }
    logInfo("shardingDbArr===>" + shardingDbArr + "        shardingTbArr===>" + shardingTbArr)

    val tmpProd = new ListBuffer[String]()
    for (db <- shardingDbArr.distinct) {
      for (table <- shardingTbArr.distinct) {
        log.info("笛卡尔积临时结果:" + s"$db.$table")
        if (cartesianProd.contains(s"$db.$table"))
          tmpProd.append(s"$db.$table")
      }
    }
    cartesianProd = tmpProd


    log.info("===>cartesianProd")
    cartesianProd.foreach(log.info)
    cartesianProd
  }


  def getConcatShardingStr(shardingKey: String, shardingValue: String): ArrayBuffer[String] = {
    val shardingArr = new ArrayBuffer[String]()
    var shardingRes = ""
    val strArr: Array[String] = shardingKey.split("#", -1)
    val matchKeyArr: Array[String] = shardingValue.split(",", -1)

    for (i <- matchKeyArr.indices) {
      shardingRes = processName(strArr(0).concat(matchKeyArr(i)).concat(strArr(2)))
      shardingArr.append(shardingRes)
    }
    shardingArr
  }

  //any任意字符模式
  def analysisAnyCharacters(shardingStr: String): String = {
    var shardingKey = ""
    if (shardingStr.matches(regexKey)) {
      val strArr: Array[String] = shardingStr.split("#", -1)
      shardingKey = strArr(0).concat("%").concat(strArr(2))
    } else {
      log.error(s"分库分表名未按 前缀#变量#后缀 规则填写，无法匹配相应库表名")
      throw new Exception(s"分库分表名未按 前缀#变量#后缀 规则填写，无法匹配相应库表名")
    }
    shardingKey
  }

  def getSplitFieldDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, oneTableName: String): DataFrame = {
    var oneTableDF: DataFrame = null
    val splitField: String = processName(splitMode.split_field)
    val splitNums: String = splitMode.split_nums
    //检验切分参数
    val splitParamsCheck: Boolean = checkSplitParams(splitField, splitNums, extractFields)
    if (splitParamsCheck) {
      log.info("高级功能切分模式开启成功!")
      oneTableDF = getPartitionSqlDF(kc, jdbcUrl, prop, oneTableName, splitField, splitNums.toInt)
    } else {
      log.warn("高级功能切分模式开启失败，转为分页模式执行！")
      oneTableDF = getPagingSqlDF(kc, jdbcUrl, prop, oneTableName)
    }
    oneTableDF
  }


  def getPartitionSqlDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tableName: String, splitField: String, splitNums: Int): DataFrame = {
    val rangeSql: String = generateFieldBoundQuery(tableName, splitField)
    log.info(s"rangeSql => $rangeSql")
    val rangeDF: DataFrame = kc.sparkSession.read.jdbc(jdbcUrl, rangeSql, prop)
    val rangeBoundDF: Array[Row] = getLongBoundDF(rangeDF).collect

    //将rangeBoundDF中的Any型统一转为Long型
    val maxNum: Long = rangeBoundDF(0)(0).asInstanceOf[Long]
    val minNum: Long = rangeBoundDF(0)(1).asInstanceOf[Long]
    val oneExecuteTableDataSql: String = generateTableQuery(tableName)
    kc.sparkSession.read.jdbc(jdbcUrl, oneExecuteTableDataSql, splitField, minNum, maxNum, splitNums.toInt, prop)
  }

  /**
   * 分页实现
   * limit 0,1000
   * limit 1000,1000 ...
   *
   * @return
   */
  def getPagingSqlDF(kc: KhaosContext, jdbcUrl: String, prop: Properties, tableName: String): DataFrame = {
    var df: DataFrame = null
    val countSql: String = getCountTableSql(tableName)
    val countDF: Array[Row] = kc.sparkSession.read.jdbc(jdbcUrl, countSql, prop).collect()
    val count: Long = countDF(0)(0).asInstanceOf[Number].longValue
    val nonExecuteTableDataSql: String = getTableDataSql(tableName)
    val sampleDF: DataFrame = getSampleDF(kc, jdbcUrl, tableName, prop, count)
    val sqls: ArrayBuffer[String] = getPagingSqls(kc, nonExecuteTableDataSql, count, sampleDF)
    log.info(s"countSql: $countSql => $count")
    kc.conf.set(CommonConstants.DATA_COUNT_SET_NUMS, String.valueOf(count))

    for (sql <- sqls) {
      val index: Int = sqls.indexOf(sql)
      if (index == 0) {
        df = kc.sparkSession.read.jdbc(jdbcUrl, sql, prop)
      } else {
        df = df.union(kc.sparkSession.read.jdbc(jdbcUrl, sql, prop))
      }
    }
    df
  }


  def getCountTableSql(tblName: String): String = {
    var countSql = s"(select count(1) from $tblName) oracleCount"
    if (!StringUtils.isBlank(filter)) {
      countSql = s"(select count(1) from $tblName where $filter) oracleCount"
    }
    log.info(s"countSql => $countSql")
    countSql
  }

  /** 表数据采样,10x10条 */
  def getSampleDF(kc: KhaosContext, jdbcUrl: String, tableName: String, prop: Properties, count: Long): DataFrame = {
    var df: DataFrame = null
    var sampleSql = ""
    var randNum = 0
    for (i <- 0 to 9) {
      if (count.toInt < 1) {
        randNum = scala.util.Random.nextInt(1)
      } else {
        randNum = scala.util.Random.nextInt(count.toInt)
      }
      sampleSql = s"(select * from $tableName WHERE rownum = $randNum) oracleSample"
      log.info(s"sampleSql => $sampleSql")
      if (i == 0) {
        df = kc.sparkSession.read.jdbc(jdbcUrl, sampleSql, prop)
      } else {
        df = df.union(kc.sparkSession.read.jdbc(jdbcUrl, sampleSql, prop))
      }
    }
    df
  }


  def getPagingSqls(kc: KhaosContext, baseSql: String, count: Long, sampleDF: DataFrame): ArrayBuffer[String] = {
    var sqls = new ArrayBuffer[String]
    var segmentations = 1
    splitOptions match {
      case OracleConstants.SPLIT_WITH_ESTIMATE =>
        segmentations = DataframeUtils.estimateTaskSegmentation(count, sampleDF, _jdbc_sharding_size)
      case OracleConstants.SPLIT_WITH_CU =>
        //20210924企业云2.0.8修复oracle2hive同步count效率问题
        segmentations = kc.sparkSession.conf.get("spark.dynamicAllocation.maxExecutors", "2").toInt
    }
    logInfo("segmentations: " + segmentations)
    val partitionNum: Int = Math.ceil(count / segmentations.toDouble).toInt
    logInfo("partitionNum: " + partitionNum)
    for (step <- 0 until segmentations) {
      val begin: Int = step * partitionNum
      var sql = ""
      if (StringUtils.isBlank(filter) || filter.trim.equals("")) {
        sql = s"(select $selected_Fields from ( $baseSql)s where  r <=($begin+$partitionNum) and r>$begin) tmp"
        if (step == segmentations - 1) {
          sql = s"(select $selected_Fields from ($baseSql)s where r <=$count and r>$begin) tmp"
        }
      } else {
        sql = s"(select $selected_Fields from ($baseSql)s where r <=($begin+$partitionNum) and r>$begin) tmp"
        if (step == segmentations - 1) {
          sql = s"(select $selected_Fields from ($baseSql)s where r <=$count and r>$begin) tmp"
        }
      }
      sqls += sql
      log.info(s"Paging Sql: $step => $sql")
    }
    sqls
  }

  def getTableDataSql(tblName: String): String = {
    var tblDataSql = s"select $selected_Fields,rownum r from $tblName"
    if (!StringUtils.isBlank(filter) && !filter.trim.equals("")) {
      tblDataSql = s"select $selected_Fields,rownum r from $tblName where ($filter)"
    }
    tblDataSql
  }

  // 生成查询表的SQL
  def generateTableQuery(table: String): String = {
    var tableQuery = s"(select $selected_Fields from $table) OracleSourceTmp"
    if (filter != null && !filter.trim.equals("")) {
      tableQuery = s"(select $selected_Fields from $table where $filter) OracleSourceTmp"
    }
    tableQuery
  }

  // 生成高级切分功能的SQL
  def generateFieldBoundQuery(tblName: String, splitField: String): String = {
    var fieldBound = s"(select max($splitField) as max,min($splitField) as min from $tblName) OracleFieldBoundTmp"
    if (filter != null && !filter.trim.equals("")) {
      fieldBound = s"(select max($splitField) as max,min($splitField) as min from $tblName where $filter) OracleFieldBoundTmp"
    }
    fieldBound
  }

  /** 初始化参数 */
  def init(kc: KhaosContext, config: String): Unit = {
    //初始化JSON
    initOracleConf(kc, config)
    //加载配置
    loadProperties(kc)
    //鉴权
    checkReadRight(kc)
    //初始化meta
    initOracleConnect(kc)
  }

  def processName(str: String): String = {
    s"""\"${str}\""""
  }

  def initOracleConf(kc: KhaosContext, config: String): Unit = {
    implicit val formats: DefaultFormats.type = DefaultFormats

    oracleConfig = parse(config, useBigDecimalForDouble = true).extract[OracleConfig]
    dbName = processName(oracleConfig.db_name)
    tblName = processName(oracleConfig.table_name)
    dbAndTbl = s"$dbName.$tblName"
    filter = oracleConfig.filter
    shardingSwitch = oracleConfig.shardingSwitch
    shardingSourceList = oracleConfig.shardingSourceList
    shardingDb = oracleConfig.shardingDb
    shardingDbKey = oracleConfig.shardingDbKey
    shardingDbValue = oracleConfig.shardingDbValue
    shardingDbType = oracleConfig.shardingDbType
    shardingTable = oracleConfig.shardingTable
    shardingTableKey = oracleConfig.shardingTableKey
    shardingTableValue = oracleConfig.shardingTableValue
    shardingTableType = oracleConfig.shardingTableType
    advancedOptions = oracleConfig.advanced_options
    extractFields = oracleConfig.extract_fields
    splitMode = oracleConfig.advanced_options.split_mode.getOrElse(SplitOption())
    if (splitMode.on_off) {
      splitOptions = splitMode.split_options.getOrElse(OracleConstants.SPLIT_WITH_CU)
    } else {
      splitOptions = OracleConstants.SPLIT_WITH_CU
    }

    //构造sql语句中需要选中的字段
    val selectFields = new StringBuffer()
    if (kc.conf.getString("khao_oraclesource_decimal2string", "false").equalsIgnoreCase("true")) {
      _oraclesource_number2string = true
    }
    for (ef <- extractFields) {
      if (ef.data_type.equalsIgnoreCase("DECIMAL") && _oraclesource_number2string) {
        selectFields.append(s"to_char(${processName(ef.field)}) as ${processName(ef.field)},")
      } else {
        selectFields.append(s"${processName(ef.field)},")
      }

    }
    selected_Fields = selectFields.toString.dropRight(1)
    logInfo("selectfields==>:" + selected_Fields)
  }

  def checkReadRight(kc: KhaosContext): Unit = {
    //权限校验
    val checkResult: Boolean = MetaUtils.checkReadAuth(kc,
      oracleConfig.db_name,
      oracleConfig.table_name,
      oracleConfig.extender.auth.clazz,
      compact(render(oracleConfig.extender.auth.params)))
    if (!checkResult) {
      log.error(s"oracle reader init failed, 权限验证未通过!")
      throw new Exception(s"oracle reader init failed, 权限验证未通过!")
    }
  }

  def initOracleConnect(kc: KhaosContext): Unit = {
    oracleMeta = MetaUtils.getOracleMeta(kc,
      oracleConfig.db_name,
      oracleConfig.table_name,
      oracleConfig.extender.meta.clazz,
      compact(render(oracleConfig.extender.meta.params)),
      this)
    val connect: OracleConnect = oracleMeta.dsOracleConnect
    host = connect.getHost
    userName = processName(connect.getUsername)
    passWord = connect.getPassword
  }

  def initOracleConnect(kc: KhaosContext, dsName: String, dsId: String, dataType: String): String = {
    oracleMeta = MetaUtils.getOracleShardingMeta(kc,
      dsName,
      dsId,
      dataType,
      oracleConfig.extender.meta.clazz,
      this)
    val connect: OracleConnect = oracleMeta.dsOracleConnect
    host = connect.getHost
    userName = processName(connect.getUsername)
    passWord = connect.getPassword
    val url: String = getOracleConnectTypeURL(oracleMeta.dsOracleConnect)
    url
  }


  def getJDBCProp(kc: KhaosContext): Properties = {
    val prop = new Properties
    prop.put("driver", driver)
    prop.put("user", userName)
    prop.put("password", passWord)
    prop.put("fetchsize", _jdbc_fetch_size)

    //处理ojdbc6驱动，osuser>30 报错问题
    val osuser: String = System.getProperty("user.name")
    if (osuser.length > 30)
      prop.put("oracle.jdbc.v$session.osuser", osuser.substring(0, 30))
    logInfo("oracle.jdbc.v$session.osuser:" + prop.getProperty("oracle.jdbc.v$session.osuser"))
    logInfo("osuser:____________" + osuser)

    val userDefinedJdbcProp: Seq[(String, String)] = kc.conf.getAllWithUnPrefix(MODULE_ORACLE_SOURCE_JDBC_OPTIONS)
    userDefinedJdbcProp.foreach((kv: (String, String)) => {
      prop.put(kv._1, kv._2)
    })

    log.info(s"jdbcProperties ${prop.toString}")
    prop
  }


  /**
   * load config properties 配置
   */
  def loadProperties(kc: KhaosContext): Unit = {
    val OracleProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.oracle.source.").toMap
    log.info("Oracle Properties")
    OracleProperties.foreach { case (k, v) => log.info(k + "   " + v) }
    _jdbc_sharding_size = OracleProperties.getOrElse(OracleConstants.MODULE_ORACLE_SOURCE_SHARDING_SIZE, "600").toInt
    _jdbc_fetch_size = OracleProperties.getOrElse(OracleConstants.MODULE_ORACLE_SOURCE_JDBC_FETCHSIZE, "1000")
  }


  /** 通过不同的连接方式获取oracle对应的jdbcUrl，host中将ip:port拼接在了一起 */
  def getOracleConnectTypeURL(connect: OracleConnect): String = {
    val host: String = connect.getHost
    val connectType: String = connect.getConnectType
    val instanceName: String = connect.getInstanceName
    var url: String = null
    connectType match {
      case "SID" => {
        url = s"jdbc:oracle:thin:@$host:$instanceName"
      }
      case "ServiceName" => {
        url = s"jdbc:oracle:thin:@//$host/$instanceName"
      }
      case "RAC" => {
        val hostArray: Array[String] = host.split(",")
        var address = ""
        for (i <- hostArray.indices) {
          //(ADDRESS=(PROTOCOL=TCP)(HOST=x.x.x.x)(PORT=1521))
          address += s"(ADDRESS=(PROTOCOL=TCP)(HOST=${hostArray(i).split(":")(0)})(PORT=${hostArray(i).split(":")(1)}))"
        }
        //jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=x.x.x.x)(PORT=1521))(ADDRESS=(PROTOCOL=TCP)(HOST=x.x.x.y)(PORT=1521)))(LOAD_BALANCE=yes)(FAILOVER=ON)(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=db.domain)))
        url = s"jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS_LIST=${address})(LOAD_BALANCE=yes)(FAILOVER=ON)(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=$instanceName)))"
      }
      case _ =>
    }
    url
  }

  /**
   * 检验切分设置功能的参数是否合规
   *
   * @param splitField    : 切分字段
   * @param splitNums     : 切分数量
   * @param extractFields : 源表字段信息
   * @return
   */
  def checkSplitParams(splitField: String, splitNums: String, extractFields: List[ExtractFieldInfo]): Boolean = {
    var result = false
    if ((splitField != null && !splitField.trim.equals("")) && (splitNums != null && !splitNums.trim.equals(""))) {
      try {
        for (ef <- extractFields) {
          if (splitField.equals(processName(ef.field)) && ef.data_type.equals("NUMBER")) {
            if (1 <= splitNums.toInt && splitNums.toInt <= 1000) {
              logInfo(s"切分参数校验通过！切分数量为：$splitNums，切分字段为：$splitField")
              result = true
            } else {
              log.error(s"切分数量输入有误，目前只支持1到1000的整数！实际输入数量为：$splitNums")
            }
          }
        }
        if (!result) {
          log.error(s"切分字段输入有误，目前只支持整型字段！实际输入字段为：$splitField")
        }
      } catch {
        case e: Exception => {
          log.error(s"切分字段或切分数量输入有误，切分字段目前只支持整型字段，切分数量只支持1到1000的整数！实际输入数量为：$splitNums，实际输入字段为：$splitField")
        }
      }
    }
    result
  }

  /**
   * 将最值df数据类型统一转为Long型，以便后续用上下边界调用多分区并行读取api
   * mysql中最值df的类型有IntegerType、LongType和DecimalType(20,0)，oracle中最值df的类型为DecimalType(38, 10)
   *
   * @param df : 不同数据类型的字段最值df
   * @return : 统一转为Long型的字段最值df
   */
  def getLongBoundDF(df: DataFrame): DataFrame = {
    //log.info(s"Oracle FieldBoundDF start schema is: ${df.printSchema}")
    //log.info(s"Oracle FieldBoundDF start is: ${df.show}")
    //需要转换的列名
    val colName: ArrayBuffer[String] = ArrayBuffer[String]()
    df.schema.foreach((s: StructField) => {
      if (!s.dataType.equals(LongType) || s.dataType.equals(DecimalType(38, 10))) {
        colName += s.name
      }
    })

    //字段类型转换
    var df_long: DataFrame = df
    colName.foreach((name: String) => {
      df_long = df_long.withColumn(name, col(name).cast(LongType))
    })
    //log.info(s"Oracle LongBoundDF end schema is: ${df_long.printSchema}")
    //log.info(s"Oracle LongBoundDF end is: ${df_long.show}")
    df_long
  }

  override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema: ArrayBuffer[KhaosStructField] = ArrayBuffer[KhaosStructField]()
    implicit val formats: DefaultFormats.type = DefaultFormats
    val oracleInfo: OracleConfig = parse(config, useBigDecimalForDouble = true).extract[OracleConfig]
    val extrFields: List[ExtractFieldInfo] = oracleInfo.extract_fields

    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }
}