package com.kingsoft.dc.khaos.module.spark.source

import java.util
import java.util.{Properties, UUID}

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.api.DmTableSplit
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{ColumnType, CommonConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.constants.MppConstants.{MODULE_GP_SOURCE_JDBC_OPTIONS, MODULE_GP_SOURCE_JDBC_URL_PARAM}
import com.kingsoft.dc.khaos.module.spark.metadata.source.{ExtractFieldInfo, GreenPlumSourceConfig, RdbmsAdvancedOption, SplitOption}
import com.kingsoft.dc.khaos.module.spark.util.TableSplitUtils
import com.kingsoft.dc.khaos.module.spark.util.TableSplitUtils.StrategyTypeEnum._
import com.kingsoft.dc.khaos.util.Logging
import org.apache.commons.lang3.StringUtils
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions.JDBC_DRIVER_CLASS
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession}
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConverters._
import scala.util.Random

/**
 * Created by haorenhui on 2021/08/17.
 */
class GreenPlumSourceV2 extends SourceStrategy with Logging with Serializable {

    var gpConfig: GreenPlumSourceConfig = _
    var jdbcProperties = new Properties()

    /** 数据抽取 */
    def source(kc: KhaosContext,
               module_id: String,
               config: String,
               dependence: Dependency,
               dsConfig: Map[String, String]): DataFrame = {
        initProperties(kc, dsConfig)
        var data: DataFrame = read(kc, init(kc, config))
        castColumn(data)
    }

    def init(kc: KhaosContext, config: String): DmTableSplit = {
        implicit val formats: DefaultFormats.type = DefaultFormats
        gpConfig = parse(config, useBigDecimalForDouble = true).extract[GreenPlumSourceConfig]
        //查询分表信息
        val splitTableInfo: DmTableSplit = TableSplitUtils.getTableSplit(
            kc,
            gpConfig.db_name,
            gpConfig.table_name,
            gpConfig.extender.meta.clazz,
            compact(render(gpConfig.extender.meta.params)))
        splitTableInfo
    }

    /** 初始化jdbc参数 */
    def initProperties(kc: KhaosContext, dsConfig: Map[String, String]): Unit = {
        log.info("initProperties start ")
        val host: String = dsConfig("host")
        val port: String = dsConfig("port")
        val instanseName: String = dsConfig("instansename")

        val jdbcUrlParam: String = kc.conf.getString(MODULE_GP_SOURCE_JDBC_URL_PARAM, "")
        jdbcProperties.put(JDBCOptions.JDBC_URL, s"jdbc:postgresql://$host:$port/$instanseName$jdbcUrlParam")
        jdbcProperties.put(JDBCOptions.JDBC_DRIVER_CLASS, "org.postgresql.Driver")
        jdbcProperties.put(JDBCOptions.JDBC_BATCH_FETCH_SIZE, "1000") //默认1000
        jdbcProperties.put("user", dsConfig("username"))
        jdbcProperties.put("password", dsConfig("password"))

        val userDefinedJdbcProp: Seq[(String, String)] = kc.conf.getAllWithUnPrefix(MODULE_GP_SOURCE_JDBC_OPTIONS)
        userDefinedJdbcProp.foreach((kv: (String, String)) => {
            jdbcProperties.put(kv._1, kv._2)
        })
        log.info("initProperties end ")
    }

    def read(kc: KhaosContext, splitTableInfo: DmTableSplit): DataFrame = {
        var data: DataFrame = kc.sparkSession.emptyDataFrame
        //未开启分表
        if (splitTableInfo == null) {
            data = doRead(kc, gpConfig.db_name, gpConfig.table_name, gpConfig.filter, jdbcProperties)
        } else {
            //分表类型
            val strategyType: String = splitTableInfo.getStrategyType
            strategyType match {
                //枚举分表
                case CUSTOM_ENUM =>
                    val splitValues: List[String] = splitTableInfo.getStrategyValue.split(",").toList
                    //获取分表名称列表 然后UNION
                    var tblNameList: List[String] = TableSplitUtils.getRealTable(kc,
                        gpConfig.db_name,
                        gpConfig.table_name,
                        gpConfig.extender.meta.clazz,
                        compact(render(gpConfig.extender.meta.params)), this, splitTableInfo, "in", splitValues)
                    //tblNameList = tblNameList.map((tblName: String) => tblName.toLowerCase())
                    tblNameList.foreach((tableName: String) => {
                        val oneTableData: DataFrame = doRead(kc, gpConfig.db_name, tableName, gpConfig.filter, jdbcProperties)
                        val index: Int = tblNameList.indexOf(tableName)
                        if (index == 0) {
                            data = oneTableData
                        } else {
                            data = data.unionByName(oneTableData)
                        }
                    })
                //时间分表
                case DATETIME =>
                    val jobBizDate: String = kc.conf.getString(SchedulerConstants.BIZ_DATE)
                    val splitTime: String = splitTableInfo.getStrategyValue match {
                        case TableSplitUtils.StrategyValueEnum.year => jobBizDate.substring(0, 4)
                        case TableSplitUtils.StrategyValueEnum.month => jobBizDate.substring(0, 6)
                        case TableSplitUtils.StrategyValueEnum.day => jobBizDate
                    }
                    val splitValues: scala.List[String] = scala.List[String](splitTime)
                    //获取分表名称列表
                    var tblNameList: List[String] = TableSplitUtils.getRealTable(kc,
                        gpConfig.db_name,
                        gpConfig.table_name,
                        gpConfig.extender.meta.clazz,
                        compact(render(gpConfig.extender.meta.params)), this, splitTableInfo, "=", splitValues)
                    //tblNameList = tblNameList.map((tblName: String) => tblName.toLowerCase())

                    if (tblNameList.isEmpty) {
                        throw new IllegalArgumentException(s"GreenplumSource:当前业务日期对应的分表不存在[$jobBizDate]")
                    }
                    val tableName: String = tblNameList.head
                    data = doRead(kc, gpConfig.db_name, tableName, gpConfig.filter, jdbcProperties)
                //业务分表
                case BUSSINESS =>
                    var suffixValue = ""
                    var tableName: String = gpConfig.table_name
                    if (gpConfig.sub_table.isDefined) {
                        val on_off: Boolean = gpConfig.sub_table.get.on_off.toBoolean
                        val suffix: String = gpConfig.sub_table.get.suffix
                        if (on_off) {
                            suffixValue = suffix.trim
                            if (suffixValue.contains("___")) {
                                throw new IllegalArgumentException(s"业务分表后缀禁止包含三个连续下划线:suffixValue[$suffixValue]")
                            }
                            if (StringUtils.isNotEmpty(suffixValue)) {
                                tableName = s"${tableName}___$suffixValue"
                            } else {
                                throw new IllegalArgumentException(s"未正确填写业务分表后缀:suffixValue[$suffixValue]")
                            }
                        } else {
                            throw new IllegalArgumentException("未开启业务分表开关!")
                        }
                    }
                    data = doRead(kc, gpConfig.db_name, tableName, gpConfig.filter, jdbcProperties)
                case _ =>
                    throw new IllegalArgumentException(s"GreenplumSource:未知的分表类型[$strategyType]")
            }
        }
        data
    }

    def doRead(kc: KhaosContext, dbName: String, tableName: String, filter: String, jdbcProperties: Properties): DataFrame = {
        log.info("doRead start ... ")
        log.info("jdbcProperties ... ")
        jdbcProperties.stringPropertyNames().asScala.foreach((key: String) => {
            log.info(s"$key : ${jdbcProperties.getProperty(key)}")
        })
        val sqlList: List[String] = computeSegmentSql(kc, processName(dbName), processName(tableName), filter, jdbcProperties)
        val session: SparkSession = kc.sparkSession
        var data: DataFrame = session.emptyDataFrame
        sqlList.foreach((sql: String) => {
            log.info(s"selectSql: $sql")
            if (sqlList.indexOf(sql) == 0)
                data = session.read.jdbc(jdbcProperties.getProperty("url"), sql, jdbcProperties)
            else
                data = data.unionByName(session.read.jdbc(jdbcProperties.getProperty("url"), sql, jdbcProperties))
        })
        log.info(s"dataSchema: ${data.schema.treeString}")
        data
    }

    /** 根据gp_segment_id计算分片sql */
    def computeSegmentSql(kc: KhaosContext, dbName: String, tableName: String, where: String, jdbcProperties: Properties): List[String] = {
        val uuid: String = String.valueOf(Random.nextInt(100000))
        val selectSegmentSql: String =
            if (StringUtils.isNotEmpty(where))
                s"(select gp_segment_id,count(*) as count from $dbName.$tableName where $where group by gp_segment_id) segment_$uuid"
            else
                s"(select gp_segment_id,count(*) as count from $dbName.$tableName group by gp_segment_id) segment_$uuid"

        log.info(s"selectSegmentSql: $selectSegmentSql")
        //查询gp_segment_id
        val segmentInfo: DataFrame = kc.sparkSession.read.jdbc(jdbcProperties.getProperty("url"), selectSegmentSql, jdbcProperties).cache()
        log.info(s"dataSchema: ${segmentInfo.schema.treeString}")
        segmentInfo.show(1000, truncate = false)
        val segmentIds: Array[Row] = segmentInfo.collect()
        //每个gp_segment_id生成一条sql
        val cols: String = gpConfig.extract_fields.map((_: ExtractFieldInfo).field).map(processName(_)).mkString(",")
        var sqlList: mutable.Buffer[String] = segmentIds.map((row: Row) => {
            val id: Int = row.getAs[Int]("gp_segment_id")
            if (StringUtils.isNotEmpty(where))
                s"(select $cols from $dbName.$tableName where $where and gp_segment_id = $id) segment_${uuid}_$id"
            else
                s"(select $cols from $dbName.$tableName where gp_segment_id = $id) segment_${uuid}_$id"
        }).toBuffer
        //表里没数据的情况
        if(sqlList.isEmpty){
            log.info(s"table:[$tableName] gp_segment_id nums is 0")
            if (StringUtils.isNotEmpty(where))
                sqlList.append(s"(select $cols from $dbName.$tableName where $where) segment_$uuid")
            else
                sqlList.append(s"(select $cols from $dbName.$tableName) segment_$uuid")
        }
        log.info("selectSqlList:")
        sqlList.foreach(log.info(_: String))
        sqlList.toList
    }

    /**
     * 转换dataFrame中的time字段类型
     * TimestampType -> StringType
     * 1970-01-01 12:00:00 -> 12:00:00
     **/
    def castColumn(data: DataFrame): DataFrame = {
        val extract_fields: List[ExtractFieldInfo] = gpConfig.extract_fields
        val columns: List[Column] = extract_fields.map((fieldInfo: ExtractFieldInfo) => {
            val field: String = fieldInfo.field
            val dataType: String = fieldInfo.data_type
            if (ColumnType.TIME.equalsIgnoreCase(dataType)) {
                log.info(s"cast $field type : timestamp to string ")
                data.col(field).cast(StringType).substr(12, 8) as field
            } else {
                data.col(field) as field
            }
        })
        val castData: DataFrame = data.select(columns: _*)
        log.info(s"castDataSchema: ${castData.schema.treeString}")
        castData
    }

    /** 获取上游的Schema */
    override def schema(dc: KhaosContext,
                        config: String,
                        dependence: Dependency): List[KhaosStructField] = {
        val fieldSchema: ArrayBuffer[KhaosStructField] = ArrayBuffer[KhaosStructField]()
        implicit val formats: DefaultFormats.type = DefaultFormats
        val gpInfo: GreenPlumSourceConfig = parse(config, useBigDecimalForDouble = true).extract[GreenPlumSourceConfig]
        val extrFields: List[ExtractFieldInfo] = gpInfo.extract_fields
        for (ef <- extrFields) {
            fieldSchema += KhaosStructField(ef.field, ef.data_type)
        }
        fieldSchema.toList
    }

    def processName(str: String): String = {
        s"""\"${str}\""""
    }

}