package com.kingsoft.dc.khaos.module.spark.source

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.ds.PGSqlConnect
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants._
import com.kingsoft.dc.khaos.module.spark.metadata.source.PGSqlSourceConfig
import com.kingsoft.dc.khaos.module.spark.model.MetaDataEntity
import com.kingsoft.dc.khaos.module.spark.util.MetaUtils
import com.kingsoft.dc.khaos.util.Logging
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import java.sql.{Connection, Date, DriverManager, ResultSet, Timestamp}
import scala.collection.mutable.ArrayBuffer

/**
 * create by wuxiang on 2021/11/22 11:13
 */
class PGSqlSource extends SourceStrategy with Logging with Serializable {
  private var _pgsql_Meta: MetaDataEntity = _
  private var _pgsql_Config: PGSqlSourceConfig = null

  private var _module_id: String = ""

  private var host: String = ""
  private var port: String = ""
  private var username: String = ""
  private var password: String = ""
  private var instanceName: String = ""
  private var dbName = ""
  private var tblName = ""
  private var taskNumber: Long = 1000000
  private var queryTimeOut: Int = 6000
  private var loginTimeout: Int = 6000

  /** 数据抽取 */
  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {

    implicit val formats = DefaultFormats
    val pgSqlConfig: PGSqlSourceConfig = parse(config, true).extract[PGSqlSourceConfig]
    this._pgsql_Config = pgSqlConfig
    this._module_id = module_id
    //库表名加引号
    dbName = s"""\"${_pgsql_Config.db_name}\""""
    tblName = s"""\"${_pgsql_Config.table_name}\""""
    val filter = _pgsql_Config.filter

    init(kc)
    val df: DataFrame = readTaskPGSql(kc, dbName, tblName, filter)
    df
  }

  def loadProperties(kc: KhaosContext): Unit = {
    try {
      val pgSqlProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.pgsql.source.").toMap
      log.info("PGSqlSource Properties")
      pgSqlProperties.foreach { case (k, v) => log.info(k + "   " + v) }
      //临时使用
      taskNumber = pgSqlProperties.getOrElse(MppConstants.MODULE_GP_SOURCE_READ_NUMS, MppConstants.DEFAULT_READ_NUMS).toLong
      queryTimeOut = pgSqlProperties.getOrElse(MppConstants.MODULE_GP_SOURCE_QUERY_TIMEOUT, MppConstants.DEFAULT_QUERY_TIMEOUT).toInt
      loginTimeout = pgSqlProperties.getOrElse(MppConstants.MODULE_GP_SOURCE_LOGIN_TIMEOUT, MppConstants.DEFAULT_LOGIN_TIMEOUT).toInt
    } catch {
      case e: Exception =>
        log.error("未读取到PGSql配置! 改用默认配置")
    }
  }

  /**
   * 读取数据库构造DataFrame
   *
   * @param kc
   * @param db_name
   * @param table_name
   * @param filter
   * @return DataFrame
   */
  def readTaskPGSql(kc: KhaosContext,
                    db_name: String,
                    table_name: String,
                    filter: String): DataFrame = {
    val count = queryNumbers(kc, db_name, table_name, filter)
    val arrSql = divideTask(count, db_name, table_name, filter)

    var repartitions = 0
    if (arrSql.length == 0) {
      repartitions = 1
    } else {
      repartitions = arrSql.length
    }

    // 构造执行sql的task
    val taskRdd = kc.sparkSession
      .sparkContext
      .makeRDD(arrSql)
      .repartition(repartitions)

    log.info("分区数: " + repartitions)
    val columnArray = new ArrayBuffer[Column]()
    // 构建structType
    val schemaArr = new ArrayBuffer[StructField]()

    // 将需要执行的sql发送到executor
    val sqlArrBroadcast = kc.sparkSession.sparkContext.broadcast(arrSql)
    for (elem <- _pgsql_Config.extract_fields) {
      schemaArr.append(StructField(elem.field, getDataType(elem.data_type), true))
      columnArray += col(elem.field)
    }
    // 根据表字段和表字段类型生成structype
    val schema: StructType = StructType(schemaArr.toArray)
    val res = taskRdd.mapPartitionsWithIndex((index, partition) => {
      var conn: Connection = null
      if (null == conn) {
        conn = getConnect()
      }
      val sql = sqlArrBroadcast.value(index)
      log.info("run sql: " + sql)
      val dataArr = new ArrayBuffer[String]()
      val statement = conn.createStatement()
      statement.setQueryTimeout(queryTimeOut)
      val resultSet: ResultSet = statement.executeQuery(sql)
      while (resultSet.next()) {
        var res: Row = Row()

        for (elem <- _pgsql_Config.extract_fields) {
          val field_name = elem.field
          res = Row.merge(res, Row(resultSet.getString(field_name)))
        }
        dataArr += res.mkString("^#&$^")
      }
      conn.close()
      partition.flatMap(row => dataArr)
    })
    // 构造DataFrame
    val rowRdd = res.map(row => {
      val attributes = row.split("\\^\\#\\&\\$\\^", -1)
      rdd2Row(schema, attributes)
    })
    val df = kc.sparkSession.createDataFrame(rowRdd, schema)
    df
  }

  /*
    构造row
   */
  def rdd2Row(schema: StructType, attributes: Array[String]) = {
    var row: Row = Row()
    // log.info("StructType is: " + schema.fields.mkString("&"))
    for (i <- 0 until schema.fields.length) {
      //元数据schema和文件对应schema顺序及数量一致
      val colType = schema.fields.toList(i).dataType
      log.info("colType is: " + colType.simpleString)
      colType match {
        case StringType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i)))
          }
        }
        case LongType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            //数据中包含NULL值
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toLong))
          }
        }
        case IntegerType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            //数据中包含NULL值
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toInt))
          }
        }
        case DateType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            //  row = Row.merge(row, Row(attributes(i)))
            row = Row.merge(row, Row(Date.valueOf(attributes(i))))
          }
        }
        case TimestampType => {
          // log.info("timestamp is: " + attributes(i).mkString("###"))
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            if (attributes(i).contains("+")) {
              // log.info("timestamp if is: " + attributes(i).mkString("###"))
              val colArr = attributes(i).split("\\+", -1)
              if (colArr.length > 1) {
                row = Row.merge(row, Row(Timestamp.valueOf(colArr(0))))
              } else {
                row = Row.merge(row, Row(Timestamp.valueOf(attributes(i))))
              }
            } else {
              //log.info("timestamp else is: " + attributes(i).mkString("###"))
              row = Row.merge(row, Row(Timestamp.valueOf(attributes(i))))
            }
          }
        }
        case FloatType => {
          if (MetaDataConstants.NULL == attributes(i)
            || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toFloat))
          }
        }
        case DoubleType => {
          if (MetaDataConstants.NULL == attributes(i) || MetaDataConstants.NULLS == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toDouble))
          }
        }
        case NullType => row = {
          Row.merge(row, Row(attributes(i)))
        }
        case _ => {
          row = Row.merge(row, Row(attributes(i)))
        }
      }
    }
    row
  }

  /*
  获取数据库连接
   */
  def getConnect() = {
    DriverManager.setLoginTimeout(loginTimeout)
    Class.forName("org.postgresql.Driver")
    val conn = DriverManager.getConnection(getConnectUrl, username, password)
    conn
  }

  /*
  类型转换
   */
  def getDataType(dataType: String): DataType = {
    var value: DataType = null
    value = dataType.toUpperCase match {
      case ColumnType.STRING => DataTypes.StringType
      case ColumnType.NUMBER => DataTypes.LongType
      case ColumnType.DATE => DataTypes.DateType
      case ColumnType.DECIMAL => DataTypes.DoubleType
      case ColumnType.TIME => DataTypes.StringType
      case ColumnType.DATETIME => DataTypes.TimestampType
      case "SERIAL4" => DataTypes.IntegerType
      case "SERIAL8" => DataTypes.LongType
      case _ => DataTypes.StringType
    }
    value
  }

  /**
   * 查询出要读取的表的数据条数
   *
   * @param kc
   * @param dbschema 库
   * @param dbtable  表
   * @param filter   条件
   * @return 总条数
   */
  def queryNumbers(kc: KhaosContext,
                   dbschema: String,
                   dbtable: String,
                   filter: String) = {
    log.info("Start Reading PGSql Database!")
    val schemaAndTable = s"${dbschema}.${dbtable}"
    // 查询数据条数
    var count_sql = ""
    if (filter != "" && null != filter) {
      count_sql = s"(select count(*) from ${schemaAndTable} where ${filter}) pgsql_tmp_count_" + System.currentTimeMillis()
    } else {
      count_sql = s"(select count(*) from ${schemaAndTable}) pgsql_tmp_count_" + System.currentTimeMillis()
    }
    val gscReadOptionMap = Map.apply(
      "url" -> getConnectUrl(),
      "user" -> username,
      "driver" -> "org.postgresql.Driver",
      "password" -> password,
      "dbschema" -> dbschema
    )
    val countdf = kc.sparkSession.read.format("jdbc")
      .options(gscReadOptionMap)
      .option("dbtable", count_sql)
      .load()
    // 分页查pgsql数据库,每次读取5万条,生成一个df
    val count = countdf.select("count").first().getAs[Long]("count")
    log.info("count ==> " + count)
    count
  }

  /**
   * 根据数据条数进行划分task
   *
   * @param count    数据总条数
   * @param dbschema 库
   * @param dbtable  表
   * @param filter   过滤条件
   * @return sql集合
   */
  def divideTask(count: Long,
                 dbschema: String,
                 dbtable: String,
                 filter: String) = {
    val sqlArr = new ArrayBuffer[String]()
    if (count != 0) {
      var tmpNums = 0l
      while (tmpNums < count) {
        if (filter != "" && null != filter) {
          sqlArr += s"select * from ${dbschema}.${dbtable} where ${filter} limit ${taskNumber} offset ${tmpNums}"
        } else {
          sqlArr += s"select * from ${dbschema}.${dbtable} limit ${taskNumber} offset ${tmpNums}"
        }
        tmpNums += taskNumber
      }
    } else {
      val tmpNums = 0l
      if (filter != "" && null != filter) {
        sqlArr += s"select * from ${dbschema}.${dbtable} where ${filter} limit ${taskNumber} offset ${tmpNums}"
      } else {
        sqlArr += s"select * from ${dbschema}.${dbtable} limit ${taskNumber} offset ${tmpNums}"
      }
    }

    sqlArr.toArray
  }


  /** 初始化参数 */
  def init(kc: KhaosContext): Unit = {
    implicit val formats: DefaultFormats.type = DefaultFormats

    // load config 配置文件参数
    loadProperties(kc)

    //权限校验
    checkReadRight(kc)

    //初始化pgsql connect
    initPGSqlConnect(kc)

  }


  /**
   * 检查是否有读权限
   *
   * @param kc
   */
  def checkReadRight(kc: KhaosContext): Unit = {
    val checkResult: Boolean = MetaUtils.checkReadAuth(kc,
      _pgsql_Config.db_name,
      _pgsql_Config.table_name,
      _pgsql_Config.extender.auth.clazz,
      compact(render(_pgsql_Config.extender.auth.params)))
    if (!checkResult) {
      log.error(s"pgsql reader init failed, 权限验证未通过!")
      throw new Exception(s"pgsql reader init failed, 权限验证未通过!")
    }
  }

  def initPGSqlConnect(kc: KhaosContext): Unit = {
    _pgsql_Meta = MetaUtils.getPGSqlMeta(kc,
      _pgsql_Config.db_name,
      _pgsql_Config.table_name,
      _pgsql_Config.extender.meta.clazz,
      compact(render(_pgsql_Config.extender.meta.params)),
      this)

    val connect: PGSqlConnect = _pgsql_Meta.dsPGSqlConnect
    instanceName = connect.getInstanceName
    host = connect.getHost
    port = connect.getPort
    username = connect.getUserName
    password = connect.getPassWord
  }


  /**
   * 连接url
   *
   * @return url
   */
  def getConnectUrl(): String = {
    val url = s"jdbc:postgresql://${host}:${port}/${instanceName}"
    log.info("url ==> " + url)
    url.trim
  }


  /** 获取上游的Schema */
  override def schema(dc: KhaosContext,
                      config: String,
                      dependence: Dependency): List[KhaosStructField] = {
    val fieldSchema = ArrayBuffer[KhaosStructField]()
    implicit val formats = DefaultFormats
    val pgInfo = parse(config, true).extract[PGSqlSourceConfig]
    val extrFields = pgInfo.extract_fields
    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }
}
