package com.kingsoft.dc.khaos.module.spark.source

import java.sql.{Connection, Date, DriverManager, ResultSet, Timestamp}

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants._
import com.kingsoft.dc.khaos.module.spark.metadata.source.GreenPlumSourceConfig
import com.kingsoft.dc.khaos.module.spark.util.TableSplitUtils
import com.kingsoft.dc.khaos.util.Logging
import org.apache.spark.sql.functions.{col, substring}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable.ArrayBuffer

/**
  * create by yansu on 2019/12/17 17:38
  */
class GreenPlumSource extends SourceStrategy with Logging with Serializable {
  private var _gpConfig: GreenPlumSourceConfig = null

  private var _module_id: String = ""

  private var _host: String = ""
  private var _port: String = ""
  private var _username: String = ""
  private var _password: String = ""
  private var _instansename: String = ""
  private var dbName = ""
  private var tblName = ""
  private var _taskNumber: Long = 1000000
  private var _queryTimeOut: Int = 6000
  private var _loginTimeout: Int = 6000

  /** 数据抽取 */
  def source(kc: KhaosContext,
             module_id: String,
             config: String,
             dependence: Dependency,
             ds_config: Map[String, String]): DataFrame = {

    implicit val formats = DefaultFormats
    val gpConfig: GreenPlumSourceConfig = parse(config, true).extract[GreenPlumSourceConfig]
    this._gpConfig = gpConfig
    this._module_id = module_id
    //库表名加引号
    dbName = s"""\"${_gpConfig.db_name}\""""
    tblName = s"""\"${_gpConfig.table_name}\""""

    //    MetaUtils.checkReadAuth(kc,
    //      gpConfig.db_name,
    //      gpConfig.table_name,
    //      gpConfig.extender.auth.clazz,
    //      compact(render(gpConfig.extender.auth.params)))
    initialize(ds_config, kc)
    whetherSplitTable(kc)
  }

  def loadProperties(kc: KhaosContext): Unit = {
    try {
      val gpProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.greenplum.source.").toMap
      log.info("GreenPlumSource Properties")
      gpProperties.foreach { case (k, v) => log.info(k + "   " + v) }
      _taskNumber = gpProperties.getOrElse(MppConstants.MODULE_GP_SOURCE_READ_NUMS, MppConstants.DEFAULT_READ_NUMS).toLong
      _queryTimeOut = gpProperties.getOrElse(MppConstants.MODULE_GP_SOURCE_QUERY_TIMEOUT, MppConstants.DEFAULT_QUERY_TIMEOUT).toInt
      _loginTimeout = gpProperties.getOrElse(MppConstants.MODULE_GP_SOURCE_LOGIN_TIMEOUT, MppConstants.DEFAULT_LOGIN_TIMEOUT).toInt
    } catch {
      case e: Exception =>
        log.error("未读取到GP配置! 改用默认配置")
    }
  }

  /**
    * 将DataFrame里的时间类型转换为String类型
    *
    * @param data DataFrame
    * @return DataFrame
    */
  def processingTimeType(data: DataFrame): DataFrame = {
    var df = data
    for (elem <- _gpConfig.extract_fields) {
      // 读取的时间类型在DataFrame中是Timestamp类型,转换成String类型.
      // 1970-01-01 11:02:12-> 11:02:12
      if (elem.data_type.equalsIgnoreCase("TIME")) {
        df = df.withColumn(elem.field, substring(df.col(elem.field).cast(StringType), 12, 19).as(elem.field))
      }
    }
    df
  }

  /**
    * 是否为分表
    *
    * @param kc KhaosContext
    * @return 分表或者主表的DataFrame
    */
  def whetherSplitTable(kc: KhaosContext): DataFrame = {
    val df: DataFrame = getRealTable(
      kc,
      _gpConfig.db_name,
      _gpConfig.table_name,
      _gpConfig.extender.meta.clazz,
      compact(render(_gpConfig.extender.meta.params)),
      _gpConfig.filter)
    //    processingTimeType(df)
    df
  }

  /**
    * 分表逻辑实现
    *
    * @param kc
    * @param dbName         库名
    * @param tblName        表名
    * @param metaClazz      类名
    * @param metaParamsJson 参数
    * @param filter         过滤条件
    * @return DataFrame
    */
  def getRealTable(kc: KhaosContext,
                   dbName: String,
                   tblName: String,
                   metaClazz: String,
                   metaParamsJson: String,
                   filter: String): DataFrame = {
    var df: DataFrame = null
    var tblNameList: scala.List[String] = scala.List[String]()
    val tableSplit: DmTableSplit = TableSplitUtils.getTableSplit(
      kc,
      dbName,
      tblName,
      metaClazz,
      metaParamsJson)
    //不分表
    if (tableSplit == null) {
      df = readTaskGP(kc, dbName, tblName, filter)
    } else {
      //分表
      tableSplit.getStrategyType match {
        //枚举分表
        case TableSplitUtils.StrategyTypeEnum.CUSTOM_ENUM => {

          val splitValues = tableSplit.getStrategyValue.split(",").toList
          //获取分表名称列表 然后UNION
          tblNameList = TableSplitUtils.getRealTable(kc, dbName, tblName, metaClazz,
            metaParamsJson, this, tableSplit, "in", splitValues)
//          tblNameList = tblNameList.map(tblname => tblname.toLowerCase())
          tblNameList.foreach(tableName => {
            val index: Int = tblNameList.indexOf(tableName)
            val dataFrame: DataFrame = readTaskGP(kc, dbName, tableName, filter)
            if (index == 0) {
              df = dataFrame
            }
            else {
              df = df.union(dataFrame)
            }
          })
        }
        //日期分表
        case TableSplitUtils.StrategyTypeEnum.DATETIME => {
          val jobBizDate: String = kc.conf.getString(SchedulerConstants.BIZ_DATE)
          val splitTime = tableSplit.getStrategyValue match {
            case TableSplitUtils.StrategyValueEnum.year => jobBizDate.substring(0, 4)
            case TableSplitUtils.StrategyValueEnum.month => jobBizDate.substring(0, 6)
            case TableSplitUtils.StrategyValueEnum.day => jobBizDate
          }
          val splitValues: scala.List[String] = scala.List[String](splitTime)
          //获取分表名称列表
          tblNameList = TableSplitUtils.getRealTable(kc, dbName, tblName, metaClazz,
            metaParamsJson, this, tableSplit, "=", splitValues)

//          tblNameList = tblNameList.map(tblname => tblname.toLowerCase())

          if (tblNameList.isEmpty) {
            throw new IllegalArgumentException("当前业务日期对应的分表不存在!")
          }
          df = readTaskGP(kc, dbName, tblNameList.head, filter)
        }
        case TableSplitUtils.StrategyTypeEnum.BUSSINESS => {
          var suffixValue = ""
          var realTable = _gpConfig.table_name

          if (_gpConfig.sub_table != None) {
            if (_gpConfig.sub_table.get.on_off == "true") {
              suffixValue = _gpConfig.sub_table.get.suffix.trim
              if (suffixValue.contains("___")) {
                throw new IllegalArgumentException("业务分表后缀禁止包含三个连续下划线!")
              }
              if (suffixValue != "" && suffixValue != null) {
                realTable = tblName + "___" + suffixValue
              } else {
                throw new IllegalArgumentException("未正确填写业务分表后缀!")
              }
            } else {
              throw new IllegalArgumentException("未开启业务分表开关!")
            }
          }
          //获取分表真实名字
          df = readTaskGP(kc, dbName, realTable, filter)
        }
        case _ => {
          throw new IllegalArgumentException("未知的分表类型!")
        }
      }
    }
    df
  }

  /**
    * 读取greenplum表数据
    *
    * @param kc
    * @param dbschema 库
    * @param dbtable  表
    * @param filter   过滤条件
    * @return DataFrame
    */
  def read4GP(kc: KhaosContext,
              dbschema: String,
              dbtable: String,
              filter: String): DataFrame = {
    log.info("Start Reading GreenPlum Database!")
    val schemaAndTable = s"${dbschema}.${dbtable}"
    val gscReadOptionMap = Map.apply(
      "url" -> getConnectUrl(),
      "user" -> _username,
      "driver" -> "org.postgresql.Driver",
      "password" -> _password,
      "dbschema" -> dbschema,
      "dbtable" -> schemaAndTable
    )
    var gpdf = kc.sparkSession.read.format("jdbc")
      .options(gscReadOptionMap)
      .load()

    if (filter != "" && filter != null) {
      gpdf = gpdf.filter(filter)
    }
    gpdf
  }


  /**
    * 读取数据库构造DataFrame
    *
    * @param kc
    * @param db_name
    * @param table_name
    * @param filter
    * @return DataFrame
    */
  def readTaskGP(kc: KhaosContext,
                 db_name: String,
                 table_name: String,
                 filter: String): DataFrame = {
    val count = queryNumbers(kc, db_name, table_name, filter)
    val arrSql = divideTask(count, db_name, table_name, filter)

    var repartitions = 0
    if (arrSql.length == 0) {
      repartitions = 1
    } else {
      repartitions = arrSql.length
    }

    // 构造执行sql的task
    val taskRdd = kc.sparkSession
      .sparkContext
      .makeRDD(arrSql)
      .repartition(repartitions)

    log.info("分区数: " + repartitions)
    val columnArray = new ArrayBuffer[Column]()
    // 构建structType
    val schemaArr = new ArrayBuffer[StructField]()

    // 将需要执行的sql发送到executor
    val sqlArrBroadcast = kc.sparkSession.sparkContext.broadcast(arrSql)
    for (elem <- _gpConfig.extract_fields) {
      schemaArr.append(StructField(elem.field, getDataType(elem.data_type), true))
      columnArray += col(elem.field)
    }
    // 根据表字段和表字段类型生成structype
    val schema: StructType = StructType(schemaArr.toArray)
    val res = taskRdd.mapPartitionsWithIndex((index, partition) => {
      var conn: Connection = null
      if (null == conn) {
        conn =getConnect()
      }
      val sql = sqlArrBroadcast.value(index)
      log.info("run sql: " + sql)
      val dataArr = new ArrayBuffer[String]()
      val statement = conn.createStatement()
      statement.setQueryTimeout(_queryTimeOut)
      val resultSet: ResultSet = statement.executeQuery(sql)
      while (resultSet.next()) {
        var res: Row = Row()

        for (elem <- _gpConfig.extract_fields) {
//          val data_type = elem.data_type
          val field_name = elem.field
          res = Row.merge(res, Row(resultSet.getString(field_name)))

//          data_type match {
//            case ColumnType.STRING => {
//              res = Row.merge(res, Row(resultSet.getString(field_name)))
//            }
//            case ColumnType.DECIMAL => {
//              res = Row.merge(res, Row(resultSet.getBigDecimal(field_name)))
//            }
//            case ColumnType.NUMBER => {
//              res = Row.merge(res, Row(resultSet.getLong(field_name)))
//            }
//            case ColumnType.DATE => {
//              res = Row.merge(res, Row(resultSet.getDate(field_name)))
//            }
//            case ColumnType.TIME => {
//              res = Row.merge(res, Row(resultSet.getTime(field_name)))
//            }
//            case ColumnType.DATETIME => {
//              res = Row.merge(res, Row(resultSet.getTimestamp(field_name)))
//            }
//            case _ => {
//              res = Row.merge(res, Row(resultSet.getString(field_name)))
//            }
//          }
        }
        dataArr += res.mkString("^#&$^")
      }
      conn.close()
      partition.flatMap(row => dataArr)
    })
    // 构造DataFrame
    val rowRdd = res.map(row => {
      val attributes: Array[String] = row.split("\\^\\#\\&\\$\\^", -1)
      rdd2Row(schema, attributes)
    })
    val df = kc.sparkSession.createDataFrame(rowRdd, schema)
    df
  }

  /*
    构造row
   */
  def rdd2Row(schema: StructType, attributes: Array[String]) = {
    var row: Row = Row()
    for (i <- 0 until schema.fields.length) {
      //元数据schema和文件对应schema顺序及数量一致
      val colType = schema.fields.toList(i).dataType
      colType match {
        case StringType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i)))
          }
        }
        case LongType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            //数据中包含NULL值
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toLong))
          }
        }
        case IntegerType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            //数据中包含NULL值
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toInt))
          }
        }
        case DateType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            //  row = Row.merge(row, Row(attributes(i)))
            row = Row.merge(row, Row(Date.valueOf(attributes(i))))
          }
        }
        case TimestampType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            if (attributes(i).contains("\\+")) {
              val colArr = attributes(i).split("\\+", -1)
              if (colArr.length > 1) {
                row = Row.merge(row, Row(Timestamp.valueOf(colArr(0))))
              } else {
                row = Row.merge(row, Row(Timestamp.valueOf(attributes(i))))
              }
            } else {
              row = Row.merge(row, Row(Timestamp.valueOf(attributes(i))))
            }
          }
        }
        case FloatType => {
          if (MetaDataConstants.NULL == attributes(i)
            || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toFloat))
          }
        }
        case DoubleType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toDouble))
          }
        }
        case NullType => row = {
          Row.merge(row, Row(attributes(i)))
        }
        case _ => {
          row = Row.merge(row, Row(attributes(i)))
        }
      }
    }
    row
  }

  /*
  获取数据库连接
   */
  def getConnect() = {
    DriverManager.setLoginTimeout(_loginTimeout)
    Class.forName("org.postgresql.Driver")
    val conn = DriverManager.getConnection(getConnectUrl, _username, _password)
    conn
  }

  /*
  类型转换
   */
  def getDataType(dataType: String): DataType = {
    var value: DataType = null
    value = dataType.toUpperCase match {
      case ColumnType.STRING => DataTypes.StringType
      case ColumnType.NUMBER => DataTypes.LongType
      case ColumnType.DATE => DataTypes.DateType
      case ColumnType.DECIMAL => DataTypes.DoubleType
      case ColumnType.TIME => DataTypes.StringType
      case ColumnType.DATETIME => DataTypes.TimestampType
      case "SERIAL4" => DataTypes.IntegerType
      case "SERIAL8" => DataTypes.LongType
      case _ => DataTypes.StringType
    }
    value
  }

  /**
    * 查询出要读取的表的数据条数
    *
    * @param kc
    * @param dbschema 库
    * @param dbtable  表
    * @param filter   条件
    * @return 总条数
    */
  def queryNumbers(kc: KhaosContext,
                   dbschema: String,
                   dbtable: String,
                   filter: String) = {
    log.info("Start Reading GreenPlum Database!")
    val schemaAndTable = s"${dbschema}.${dbtable}"
    // 查询数据条数
    var count_sql = ""
    if (filter != "" && null != filter) {
      count_sql = s"(select count(*) from ${schemaAndTable} where ${filter}) gp_tmp_count_" + System.currentTimeMillis()
    } else {
      count_sql = s"(select count(*) from ${schemaAndTable}) gp_tmp_count_" + System.currentTimeMillis()
    }
    val gscReadOptionMap = Map.apply(
      "url" -> getConnectUrl(),
      "user" -> _username,
      "driver" -> "org.postgresql.Driver",
      "password" -> _password,
      "dbschema" -> dbschema
    )
    val countdf = kc.sparkSession.read.format("jdbc")
      .options(gscReadOptionMap)
      .option("dbtable", count_sql)
      .load()
    // 分页查询gp数据库,每次读取5万条,生成一个df
    val count = countdf.select("count").first().getAs[Long]("count")
    log.info("count ==> " + count)
    count
  }

  /**
    * 根据数据条数进行划分task
    *
    * @param count    数据总条数
    * @param dbschema 库
    * @param dbtable  表
    * @param filter   过滤条件
    * @return sql集合
    */
  def divideTask(count: Long,
                 dbschema: String,
                 dbtable: String,
                 filter: String) = {
    val sqlArr = new ArrayBuffer[String]()
    if (count != 0) {
      var tmpNums = 0l
      while (tmpNums < count) {
        if (filter != "" && null != filter) {
          sqlArr += s"select * from ${dbschema}.${dbtable} where ${filter} limit ${_taskNumber} offset ${tmpNums}"
        } else {
          sqlArr += s"select * from ${dbschema}.${dbtable} limit ${_taskNumber} offset ${tmpNums}"
        }
        tmpNums += _taskNumber
      }
    } else {
      val tmpNums = 0l
      if (filter != "" && null != filter) {
        sqlArr += s"select * from ${dbschema}.${dbtable} where ${filter} limit ${_taskNumber} offset ${tmpNums}"
      } else {
        sqlArr += s"select * from ${dbschema}.${dbtable} limit ${_taskNumber} offset ${tmpNums}"
      }
    }

    sqlArr.toArray
  }

  def readLimitGP(kc: KhaosContext,
                  dbschema: String,
                  dbtable: String,
                  filter: String): DataFrame = {
    log.info("Start Reading GreenPlum Database!")
    val schemaAndTable = s"${dbschema}.${dbtable}"
    var offset = 0l
    // 查询数据条数
    val count_sql = s"(select count(*) from ${schemaAndTable}) gp_tmp_count"
    val gscReadOptionMap = Map.apply(
      "url" -> getConnectUrl(),
      "user" -> _username,
      "driver" -> "org.postgresql.Driver",
      "password" -> _password,
      "dbschema" -> dbschema
    )
    var countdf = kc.sparkSession.read.format("jdbc")
      .options(gscReadOptionMap)
      .option("dbtable", count_sql)
      .load()

    // 分页查询gp数据库,每次读取5万条,生成一个df
    val count = countdf.select("count").first().getAs[Long]("count")
    println("count==>" + count)
    var partitions = 20
    partitions = (count / _taskNumber).toInt
    println("估算分区数: " + partitions)
    if (partitions < 20) {
      partitions = 20
    }
    val first_sql = s"(select * from ${schemaAndTable} limit ${_taskNumber} offset ${offset}) first_tmp"
    var dataFrame: DataFrame = kc.sparkSession.read.format("jdbc")
      .options(gscReadOptionMap)
      .option("dbtable", first_sql)
      .load()
    println("first_sql: " + first_sql)
    offset += _taskNumber
    var flag = true
    while (flag) {
      if (!(offset > count)) {
        println("offset ==> " + offset)
        val times = "gp_tmp_" + System.currentTimeMillis()
        val limit_sql = s"(select * from ${schemaAndTable} limit ${_taskNumber} offset ${offset}) ${times}"
        println("limit_sql: " + limit_sql)
        val dataFrame1 = kc.sparkSession.read.format("jdbc")
          .options(gscReadOptionMap)
          .option("dbtable", limit_sql)
          .load()
        dataFrame = dataFrame1.unionByName(dataFrame)
        offset += _taskNumber
      } else {
        flag = false
      }
    }

    if (filter != "" && filter != null) {
      dataFrame = dataFrame.filter(filter)
    }
    println("分区数: " + dataFrame.rdd.partitions.length)
    dataFrame
  }

  /**
    * 初始化物理地址
    */
  def initialize(ds_config: Map[String, String], khaosContext: KhaosContext) = {
    log.info("Initialize The Metadata!")
    loadProperties(khaosContext)
    initMetaData(ds_config)
  }

  /**
    * 过去连接url
    *
    * @return url
    */
  def getConnectUrl(): String = {
    val host = _host
    val port = _port
    val dbName = _instansename
    val url = s"jdbc:postgresql://${host}:${port}/${dbName}"
    log.info("url ==> " + url)
    url.trim
  }

  /**
    * 获取物理地址
    *
    * @param ds_config 物理地址参数
    */
  def initMetaData(ds_config: Map[String, String]) = {
    log.info("Initializes The Physical Address!")
    //    val className = _gpConfig.extender.meta.clazz
    //    implicit val formats = DefaultFormats
    //    val metaParamsJson: ExtendedMetaParams = _gpConfig.extender.meta.params.extract[ExtendedMetaParams]
    //    val metaParamsStr: String =
    //      s"""
    //         |{"project_id":"${metaParamsJson.project_id}","ds_id":"${metaParamsJson.ds_id}","ds_type":"${metaParamsJson.ds_type}","ds_name":"${metaParamsJson.ds_name}"}
    //              """.stripMargin
    //    val dbname = _gpConfig.db_name
    //    val tblname = _gpConfig.table_name
    //
    //    val gpConnect = MetaUtils
    //      .getGreenPlumMeta(_kc, dbname, tblname, className, metaParamsStr, this)
    //      .getDsGreenPlumConnect
    //
    //    _host = gpConnect.getHost
    //    _port = gpConnect.getPort
    //    _username = gpConnect.getUsername
    //    _password = gpConnect.getPassword
    //    _instansename = gpConnect.getInstanceName
    _host = ds_config("host")
    _port = ds_config("port")
    _username = ds_config("username")
    _password = ds_config("password")
    _instansename = ds_config("instansename")
  }

  /** 获取上游的Schema */
  override def schema(dc: KhaosContext,
                      config: String,
                      dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema = ArrayBuffer[KhaosStructField]()
    implicit val formats = DefaultFormats
    val gpInfo = parse(config, true).extract[GreenPlumSourceConfig]
    val extrFields = gpInfo.extract_fields
    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }
}
