package com.kingsoft.dc.khaos.module.spark.source

import java.util.Properties

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{CommonConstants, OracleConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.source.{ExtractFieldInfo, OracleConfig, SplitOption}
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util.{DataframeUtils, MetaUtils, TableSplitUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable.ArrayBuffer

/**
  * Created by WANGYING15 on 2019/6/12.
  */
class OracleSourceDbs extends SourceStrategy with Logging {

  private var oracleMeta: MetaDataEntity = null
  private var oracleConfig: OracleConfig = null
  private var _jdbc_sharding_size = 600
  private var selectedFields = ""
  private var _dbname_type_switch = false
  private var _oraclesource_number2string = false
  private var _splite_mode: SplitOption = null
  private var _jdbc_fetch_size = "1000"

  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {

    init(kc, config)
    loadProperties(kc)
    val connect = oracleMeta.dsOracleConnect
    val host = connect.getHost
    val connectType = connect.getConnectType
    val userName = connect.getUsername
    val passWord = connect.getPassword
    val instanceName = connect.getInstanceName

    // 企业云中国人寿定制oracle开关
    var dbName = ""
    if (_dbname_type_switch) {
      dbName = oracleConfig.db_name
    } else {
      dbName = userName
    }

    var tblName = oracleConfig.table_name
    val filter = oracleConfig.filter
    val advancedOptions = oracleConfig.advanced_options
    val extractFields = oracleConfig.extract_fields
    _splite_mode = advancedOptions.split_mode.getOrElse(SplitOption())
    //val fetchSize = "1000"

    //构造sql语句中需要选中的字段
    val selectFields = new StringBuffer()
    if (kc.conf.getString("khao_oraclesource_decimal2string", "false").equalsIgnoreCase("true")) {
      _oraclesource_number2string = true
    }
    for (ef <- extractFields) {
      if (ef.data_type.equalsIgnoreCase("DECIMAL") && _oraclesource_number2string) {
        selectFields.append(s"to_char(${ef.field}) as ${ef.field},")
      } else {
        selectFields.append(s"${ef.field},")
      }

    }
    selectedFields = selectFields.toString.dropRight(1)
    logInfo("selectfields==>:" + selectedFields)

    //拼接sql查询语句
    val tableQuery: String = generateTableQuery(dbName, tblName, selectedFields, filter)
    log.info(s"OracleSourceTmp sql is: $tableQuery")

    val url = getOracleConnectTypeURL(host, instanceName, connectType)
    log.info(s"Oracle jdbcUrl is: $url")
    val driver = "oracle.jdbc.driver.OracleDriver"
    val prop = new Properties
    prop.put("driver", driver)
    prop.put("user", userName)
    prop.put("password", passWord)
    prop.put("fetchsize", _jdbc_fetch_size)
    logInfo("fetchsize_jdbc_fetch_size=======》" + _jdbc_fetch_size)
    //处理ojdbc6驱动，osuser>30 报错问题
    val osuser = System.getProperty("user.name")
    if (osuser.length > 30)
      prop.put("oracle.jdbc.v$session.osuser", osuser.substring(0, 30))


    //若split_mode开关关闭单分区无并发读取，开关打开则多个分区并发读取，分区数为split_nums

    if (advancedOptions.split_mode.nonEmpty && _splite_mode.on_off.equals(true) && _splite_mode.split_options.get.equalsIgnoreCase("split_with_fields")) {
      log.info("高级功能切分设置打开，按字段切分模式正在开启...")
      val df = getRealTable4SplitModeOn(kc, dbName, tblName, oracleConfig.extender.meta.clazz,
        compact(render(oracleConfig.extender.meta.params)), url, tableQuery, prop, selectedFields, filter, _splite_mode, extractFields)

      //      log.info(s"OracleSourceDF partitions size is: ${df.rdd.partitions.size}")
      //      log.info(s"OracleSourceDF schema is: ${df.printSchema}")
      //      log.info(s"OracleSourceDF is: ${df.show}")
      df
    } else {
      tblName = s"$dbName.$tblName"
      //      val df = getRealTable(kc, dbName, tblName, oracleConfig.extender.meta.clazz,
      //        compact(render(oracleConfig.extender.meta.params)), url, tableQuery, prop, selectedFields, filter)
      val df = getPagingSqlDF(kc, url, filter, prop, tblName)
      //      log.info(s"OracleSourceDF partitions size is: ${df.rdd.partitions.size}")
      //      log.info(s"OracleSourceDF schema is: ${df.printSchema}")
      //      log.info(s"OracleSourceDF is: ${df.show}")
      df
    }
  }

  /** 高级功能开启后的读取表 */
  def getRealTable4SplitModeOn(kc: KhaosContext, dbName: String, tblName: String, metaClazz: String, metaParamsJson: String, url: String,
                               tableQuery: String, prop: Properties, selectedFields: String, filter: String, splitMode: SplitOption, extractFields: List[ExtractFieldInfo]): _root_.org.apache.spark.sql.DataFrame = {
    var df: DataFrame = null

    val splitField = splitMode.split_field
    val splitNums = splitMode.split_nums
    //检验切分参数
    val splitParamsCheck = checkSplitParams(splitField, splitNums, extractFields)
    if (splitParamsCheck) {
      log.info("高级功能切分模式开启成功!")
      val fieldBound = generateFieldBoundQuery(dbName, tblName, splitField, filter)
      //log.info(s"MySQLFieldBoundTmp sql is: $fieldBound")

      val fieldBoundDF = kc.sparkSession.read.jdbc(url, fieldBound, prop)
      val longBoundDF = getLongBoundDF(fieldBoundDF)
      val fieldBoundArr = longBoundDF.collect
      //将longBoundDF中的Any型统一转为Long型
      val maxNum = fieldBoundArr(0)(0).asInstanceOf[Long]
      val minNum = fieldBoundArr(0)(1).asInstanceOf[Long]
      df = kc.sparkSession.read.jdbc(url, tableQuery, splitField, minNum, maxNum, splitNums.toInt, prop)
    } else {
      log.warn("高级功能切分模式开启失败，转为普通模式执行！")
      df = kc.sparkSession.read.jdbc(url, tableQuery, prop)
    }

    df
  }


  /**
    * 分页实现 limit 0,1000
    * limit 1000,1000 ...
    *
    * @param kc
    * @param prop
    * @return
    */
  def getPagingSqlDF(kc: KhaosContext, url: String, filter: String, prop: Properties, oneTableName: String): DataFrame = {
    var df: DataFrame = null
    val countSql = getCountTableSql(oneTableName, filter)
    val countDF = kc.sparkSession.read.jdbc(url, countSql, prop).collect()
    logInfo("count:_________" + countDF(0)(0))
    val count = countDF(0)(0).asInstanceOf[Number].longValue
    val nonExecuteTableDataSql = getTableDataSql(oneTableName, filter)
    val sampleDF = getSampleDF(kc, prop, count, url)
    val sqls = getPagingSqls(kc, nonExecuteTableDataSql, count, filter, sampleDF)
    log.info(s"countSql: $countSql => $count")
    kc.conf.set(CommonConstants.DATA_COUNT_SET_NUMS, String.valueOf(count))

    for (sql <- sqls) {
      val index = sqls.indexOf(sql)
      if (index == 0) {
        df = kc.sparkSession.read.jdbc(url, sql, prop)
      } else {
        df = df.union(kc.sparkSession.read.jdbc(url, sql, prop))
      }
    }
    df
  }

  def getCountTableSql(tblName: String, filter: String): String = {
    var countSql = s"(select count(1) from $tblName) oracleCount"
    if (!StringUtils.isBlank(filter)) {
      countSql = s"(select count(1) from $tblName where $filter) oracleCount"
    }
    log.info(s"countSql => $countSql")
    countSql
  }

  def getSampleDF(kc: KhaosContext, prop: Properties, count: Long, url: String): DataFrame = {
      var df: DataFrame = null
      var sampleSql = ""
      var randNum = 0
      for (i <- 0 to 9) {
          if (count < 1) {
              randNum = scala.util.Random.nextInt(1)
          } else {
              randNum = scala.util.Random.nextInt(count.toInt)
          }
          sampleSql = s"(select * from ${oracleConfig.db_name}.${oracleConfig.table_name} SAMPLE BLOCK (10) WHERE ROWNUM <= 10) oracleSample"
          if (i == 0) {
              df = kc.sparkSession.read.jdbc(url, sampleSql, prop)
          } else {
              df = df.union(kc.sparkSession.read.jdbc(url, sampleSql, prop))
          }
      }
      df
  }

  def getPagingSqls(kc: KhaosContext, baseSql: String, count: Long, filter: String, sampleData:DataFrame): ArrayBuffer[String] = {
    var sqls = new ArrayBuffer[String]
    var segmentations = 2
    if (oracleConfig.advanced_options.split_mode.nonEmpty && _splite_mode.on_off.equals(true) && _splite_mode.split_options.get.equalsIgnoreCase("split_with_estimate")) {
      log.info("高级功能切分设置打开，按估算切分模式开启...")
      segmentations = DataframeUtils.estimateTaskSegmentation(count, sampleData, _jdbc_sharding_size)
    } else {
      log.info("高级功能切分设置关闭，按默认资源数切分模式开启...")
      //20210924企业云2.0.8修复oracle2hive同步count效率问题
      segmentations = kc.sparkSession.conf.get("spark.dynamicAllocation.maxExecutors", "2").toInt
    }
    logInfo("segmentations: " + segmentations)
    val partitionNum = Math.ceil(count / segmentations).toInt
    logInfo("partitionNum: " + partitionNum)
    for (step <- 0 until segmentations) {
      val begin = step * partitionNum
      var sql = ""
      if (StringUtils.isBlank(filter)) {
        sql = s"(select $selectedFields from ( $baseSql where rownum <=($begin+$partitionNum))s where r>$begin) tmp"
        if (step == segmentations - 1) {
          sql = s"(select $selectedFields from ( $baseSql where rownum <=$count)s where r>$begin) tmp"
        }
      } else {
        sql = s"(select $selectedFields from ( $baseSql  and rownum <=($begin+$partitionNum))s where r>$begin) tmp"
        if (step == segmentations - 1) {
          sql = s"(select $selectedFields from ( $baseSql  and rownum <=$count)s where r>$begin) tmp"
        }
      }
      sqls += sql
      log.info(s"Paging Sql: $step => $sql")
    }
    sqls
  }

  def getTableDataSql(tblName: String, filter: String): String = {
    var tblDataSql = s"select $selectedFields,rownum r from $tblName"
    if (!StringUtils.isBlank(filter)) {
      tblDataSql = s"select $selectedFields,rownum r from $tblName where $filter"
    }
    tblDataSql
  }

  /** 获得真实物理分表和对应的DF */
  def getRealTable(kc: KhaosContext, dbName: String, tblName: String, metaClazz: String, metaParamsJson: String, url: String, tableQuery: String, prop: Properties,
                   selectedFields: String, filter: String): DataFrame = {
    var df: DataFrame = null

    df = kc.sparkSession.read.jdbc(url, tableQuery, prop)

    df
  }

  // 生成查询表的SQL
  def generateTableQuery(dbName: String, tblName: String, selectedFields: String, filter: String): String = {
    var tableQuery = s"(select $selectedFields from $dbName.$tblName) OracleSourceTmp"
    if (filter != null && !filter.trim.equals("")) {
      tableQuery = s"(select $selectedFields from $dbName.$tblName where $filter) OracleSourceTmp"
    }
    tableQuery
  }

  // 生成高级切分功能的SQL
  def generateFieldBoundQuery(dbName: String, tblName: String, splitField: String, filter: String): String = {
    var fieldBound = s"(select max($splitField),min($splitField) from $dbName.$tblName) OracleFieldBoundTmp"
    if (filter != null && !filter.trim.equals("")) {
      fieldBound = s"(select max($splitField),min($splitField) from $dbName.$tblName where $filter) OracleFieldBoundTmp"
    }
    fieldBound
  }

  /** 初始化参数 */
  def init(kc: KhaosContext, config: String): Unit = {
    implicit val formats = DefaultFormats
    oracleConfig = parse(config, true).extract[OracleConfig]

    val PROJECT_ID: Int = kc.conf.getString(SchedulerConstants.PROJECT_ID).toInt
    var metaParamsMap: Map[String, Any] = oracleConfig.extender.meta.params.values
    metaParamsMap = metaParamsMap.updated("project_id", PROJECT_ID)
    import org.json4s.DefaultFormats
    import org.json4s.native.Json
    val metaJson: String = Json(DefaultFormats).write(metaParamsMap)

    // load config 配置文件参数
    loadProperties(kc)

    //获取元数据
    oracleMeta = MetaUtils.getOracleDs(kc,
      oracleConfig.db_name,
      oracleConfig.table_name,
      oracleConfig.extender.meta.clazz,
      metaJson,
      this)
  }

  /**
    * load config properties 配置
    *
    * @param kc
    */
  def loadProperties(kc: KhaosContext): Unit = {
    val oracleProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.oracle.source.").toMap
    log.info("OracleSource Properties")
    oracleProperties.foreach { case (k, v) => log.info(k + "   " + v) }
    _jdbc_sharding_size = oracleProperties.getOrElse(OracleConstants.MODULE_ORACLE_SOURCE_SHARDING_SIZE, "600").toInt
    _dbname_type_switch = oracleProperties.getOrElse(OracleConstants.MODULE_ORACLE_SOURCESINK_DBNAMETYPE_SWITCH, "false").toBoolean
    _jdbc_fetch_size = oracleProperties.getOrElse(OracleConstants.MODULE_ORACLE_SOURCE_JDBC_FETCHSIZE, "1000")
  }


  /** 通过不同的连接方式获取oracle对应的jdbcUrl，host中将ip:port拼接在了一起 */
  def getOracleConnectTypeURL(host: String, instanceName: String, connectType: String): String = {
    var url: String = null
    connectType match {
      case "SID" => {
        url = s"jdbc:oracle:thin:@$host:$instanceName"
      }
      case "ServiceName" => {
        url = s"jdbc:oracle:thin:@//$host/$instanceName"
      }
      case "RAC" => {
        val hostArray = host.split(",")
        var address = ""
        for (i <- 0 until hostArray.length) {
          //(ADDRESS=(PROTOCOL=TCP)(HOST=x.x.x.x)(PORT=1521))
          address += s"(ADDRESS=(PROTOCOL=TCP)(HOST=${hostArray(i).split(":")(0)})(PORT=${hostArray(i).split(":")(1)}))"
        }
        //jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=x.x.x.x)(PORT=1521))(ADDRESS=(PROTOCOL=TCP)(HOST=x.x.x.y)(PORT=1521)))(LOAD_BALANCE=yes)(FAILOVER=ON)(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=db.domain)))
        url = s"jdbc:oracle:thin:@(DESCRIPTION=(ADDRESS_LIST=${address})(LOAD_BALANCE=yes)(FAILOVER=ON)(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=$instanceName)))"
      }
      case _ =>
    }
    url
  }

  /**
    * 检验切分设置功能的参数是否合规
    *
    * @param splitField    : 切分字段
    * @param splitNums     : 切分数量
    * @param extractFields : 源表字段信息
    * @return
    */
  def checkSplitParams(splitField: String, splitNums: String, extractFields: List[ExtractFieldInfo]): Boolean = {
    var result = false
    if ((splitField != null && !splitField.trim.equals("")) && (splitNums != null && !splitNums.trim.equals(""))) {
      try {
        for (ef <- extractFields) {
          if (splitField.equals(ef.field) && ef.data_type.equals("NUMBER")) {
            if (1 <= splitNums.toInt && splitNums.toInt <= 1000) {
              logInfo(s"切分参数校验通过！切分数量为：$splitNums，切分字段为：$splitField")
              result = true
            } else {
              log.error(s"切分数量输入有误，目前只支持1到1000的整数！实际输入数量为：$splitNums")
            }
          }
        }
        if (!result) {
          log.error(s"切分字段输入有误，目前只支持整型字段！实际输入字段为：$splitField")
        }
      } catch {
        case e: Exception => {
          log.error(s"切分字段或切分数量输入有误，切分字段目前只支持整型字段，切分数量只支持1到1000的整数！实际输入数量为：$splitNums，实际输入字段为：$splitField")
        }
      }
    }
    result
  }

  /**
    * 将最值df数据类型统一转为Long型，以便后续用上下边界调用多分区并行读取api
    * mysql中最值df的类型有IntegerType、LongType和DecimalType(20,0)，oracle中最值df的类型为DecimalType(38, 10)
    *
    * @param df : 不同数据类型的字段最值df
    * @return : 统一转为Long型的字段最值df
    */
  def getLongBoundDF(df: DataFrame): DataFrame = {
    //log.info(s"Oracle FieldBoundDF start schema is: ${df.printSchema}")
    //log.info(s"Oracle FieldBoundDF start is: ${df.show}")
    //需要转换的列名
    val colName = ArrayBuffer[String]()
    val schema = df.schema.foreach(s => {
      if (!s.dataType.equals(LongType) || s.dataType.equals(DecimalType(38, 10))) {
        colName += s.name
      }
    })

    //字段类型转换
    var df_long = df
    colName.foreach(name => {
      df_long = df_long.withColumn(name, col(name).cast(LongType))
    })
    //log.info(s"Oracle LongBoundDF end schema is: ${df_long.printSchema}")
    //log.info(s"Oracle LongBoundDF end is: ${df_long.show}")
    df_long
  }

  override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema = ArrayBuffer[KhaosStructField]()
    implicit val formats = DefaultFormats
    val oracleInfo = parse(config, true).extract[OracleConfig]
    val extrFields = oracleInfo.extract_fields

    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }
}