package com.kingsoft.dc.khaos.module.spark.sink

import java.sql.DriverManager.getConnection
import java.sql.{Connection, DriverManager, Statement}
import java.util
import java.util.{Properties, Random}

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.extender.meta.model.ds.OracleConnect
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{ColumnType, MetaDataConstants, OracleConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.{ExtractFieldInfo, OracleConfig}
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, RelationDataStatusInfo}
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.Logging
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.{Column, DataFrame, SaveMode}
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, render}

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
 * Created by WANGYING15 on 2019/6/13.
 */
class OracleSink extends SinkStrategy with Logging {

  private var oracleMeta: MetaDataEntity = null
  private var oracleConfig: OracleConfig = null
  private val SINK_MODE_INSERT = "append";
  private val SINK_MODE_OVERWRITE = "overwrite";
  private val SINK_MODE_UPSERT = "upsert";
  private var dbName = ""
  private var tblName = ""

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JObject,
                    schema: Schema,
                    dataFrame: DataFrame): this.type = {
    var meta_origin: String = kc.conf.getString(SchedulerConstants.META_ORIGIN, MetaDataConstants.META_DATAMANAGEMENT)
    meta_origin match {
      case MetaDataConstants.META_DATAMANAGEMENT =>
        this.write(kc, module_id, config, schema, dataFrame)
      case MetaDataConstants.META_DBSERVER =>
        val dbs: OracleSinkDbs = new OracleSinkDbs
        dbs.sink(kc, module_id, config, schema, dataFrame)
      case _ =>
        throw new Exception(s"oracleSink meta_origin 参数错误! ==> $meta_origin")
    }

    this
  }

  /** 数据输出 */
  def write(kc: KhaosContext,
            module_id: String,
            config: JObject,
            schema: Schema,
            dataFrame: DataFrame): this.type = {
    //log.info(s"oracle writer start ......")
    //log.info(s"oracle writer config json : ${config.toString}")

    init(kc, config)

    val connect = oracleMeta.dsOracleConnect
    val tblEntiy = oracleMeta.tableEntiy
    val colEntiy = oracleMeta.columnEntiy
    val host = connect.getHost
    val connectType = connect.getConnectType
    //    val userName = connect.getUsername
    val userName = s"""\"${connect.getUsername}\""""
    val passWord = connect.getPassword
    val instanceName = connect.getInstanceName

    var writeType = oracleConfig.write_option.toLowerCase
    val extractFields = oracleConfig.extract_fields
    var convertDF: DataFrame = DataframeUtils.setDefaultValue(extractFields, colEntiy, dataFrame)
    convertDF = convertDataType(extractFields, convertDF)
    //将df注册成临时表
    //convertDF.createOrReplaceTempView("OracleSinkTmp")

    //对表进行行筛选和列裁剪（预留）
    //val tableQuery = s"select * from OracleSinkTmp"
    //log.info(s"OracleSinkTmp sql is: $tableQuery")
    val resultDF = convertDF
    //log.info(s"OracleSinkDF partitions size is: ${resultDF.rdd.partitions.size}")
    //log.info(s"OracleSinkDF schema is: ${resultDF.printSchema}")
    //log.info(s"OracleSinkDF is: ${resultDF.show}")

    val url = OracleUtils.getOracleConnectTypeURL(host, instanceName, connectType)
    log.info(s"Oracle jdbcUrl is: $url")
    val driver = "oracle.jdbc.driver.OracleDriver"
    val prop = new Properties
    prop.put("driver", driver)
    prop.put("user", userName)
    prop.put("password", passWord)
    prop.put("url", url)
    //处理ojdbc6驱动，osuser>30 报错问题
    val osuser = System.getProperty("user.name")
    if (osuser.length > 30)
      prop.put("oracle.jdbc.v$session.osuser", osuser.substring(0, 30))


    //获得真实物理表
    var tblNameAndDF: mutable.HashMap[String, DataFrame] = TableSplitUtils.getSinkRealTable(kc, dbName, tblName, this, oracleConfig.extender.meta.clazz,
      compact(render(oracleConfig.extender.meta.params)), resultDF, null)
    //    tblNameAndDF=tblNameAndDF.map(tp=>(tp._1.toUpperCase(),tp._2))
    //插入的表的字段名称必须和DF中Schema的字段完全一致才能成功,顺序和个数可以不一致
    //分区数默认和传入的DF分区数一致，可再次进行重新分区
    var oracleDataStatusInfo = new RelationDataStatusInfo
    tblNameAndDF.foreach(tp => {
      val realTblName: String = tp._1
      val realDataframe: DataFrame = tp._2
      val (resultData, accumulator) = DataframeUtils.calculateDataNum(kc, realDataframe, "OracleSink")
      //当savemode是overwrite时，truncate在Oracle中的表，而不是删除再重建其现有的表。
      if (!SINK_MODE_UPSERT.equals(writeType)) {
        if (SINK_MODE_INSERT.equals(writeType)) {
          if (oracleConfig.is_delete != None && oracleConfig.is_delete.get) {
            if (oracleConfig.filter == None)
              throw new RuntimeException("oracleSink 过滤条件不能为空！")
            //拼接del sql 并执行
            val delSql = s"delete from $dbName.$realTblName where ${oracleConfig.filter.get}"
            logInfo(s"---filter Sql:$delSql")
            OracleUtils.executeSql(url, prop, delSql)
          }
        } else if (SINK_MODE_OVERWRITE.equals(writeType)) {
          //当savemode是overwrite时，用admin账户手动清空目标表并将savemode改为insert
          val truncateSql = s"truncate  table $dbName.$realTblName "
          val oracleAdminConnect = MetaUtils.getOracleDatasourceByName(kc, oracleConfig.extender.meta.clazz, compact(render(oracleConfig.extender.meta.params)))
          val adminProp = OracleUtils.getPropbyConnInfo(oracleAdminConnect)
          OracleUtils.executeSql(url, adminProp, truncateSql)
          writeType = SINK_MODE_INSERT
        }
        prop.put("dbtable", s"$dbName.${realTblName}")


        import com.kingsoft.dc.khaos.module.spark.enhance.DataFrameWriterEnhance._
        //兼容spark底层VARCHAR超出默认256长度的限制
        if (extractFields.map(v => v.data_type.toUpperCase).contains("STRING")){
          val tableColTypes = createTableColTypes(extractFields)
          resultData.write.format("jdbc").mode(writeType)
            .option("createTableColumnTypes", tableColTypes)
            .options(prop.asScala).save2Oracle()
        }else {
          resultData.write.format("jdbc").mode(writeType)
            .options(prop.asScala).save2Oracle()
        }

      } else {
        val pkArr = oracleConfig.logic_pk; // upsert 主键信息
        //生成临时表 "KTMP_" + 时间戳 + 8位随机数
        var tempTable = "KTMP_" + System.currentTimeMillis + "_" + new Random().nextInt(100000000)
        tempTable = s"""\"$tempTable\""""
        var isCreateTmpTab = false
        //获取管理员账户信息 只用于建表使用,谨慎使用
        val oracleAdminConnect = MetaUtils.getOracleDatasourceByName(kc, oracleConfig.extender.meta.clazz, compact(render(oracleConfig.extender.meta.params)))
        try {
          Class.forName(driver);
          val adminProp = OracleUtils.getPropbyConnInfo(oracleAdminConnect)
          val sqlCreatTab = s"create table $dbName.$tempTable as select * from $dbName.$realTblName A where 1=0"
          log.info("建表语句：" + sqlCreatTab)
          OracleUtils.executeSql(url, adminProp, sqlCreatTab)
          isCreateTmpTab = true
          //插入源表数据到临时表
          //          resultData.write.mode(SaveMode.Append).jdbc(url, tempTable , adminProp)
          adminProp.put("url", url)
          adminProp.put("dbtable", tempTable)
          //此处通过隐式类增加saveToOracle方法
          import com.kingsoft.dc.khaos.module.spark.enhance.DataFrameWriterEnhance._

          //兼容spark底层VARCHAR超出默认256长度的限制
          if (extractFields.map(v => v.data_type.toUpperCase).contains("STRING")){
            val tableColTypes = createTableColTypes(extractFields)
            resultData.write.format("jdbc").mode(SaveMode.Append)
              .option("createTableColumnTypes", tableColTypes)
              .options(adminProp.asScala).save2Oracle()
          }else {
            resultData.write.format("jdbc").mode(SaveMode.Append)
              .options(adminProp.asScala).save2Oracle()
          }

          //执行 merge 语句
          val mergeSql =
            s"""
            merge into $dbName.$realTblName a using (select ${RmdbUtil.getCols(colEntiy)} from $dbName.${tempTable} ) b on( ${RmdbUtil.joinKeys("a", "b", pkArr)} )
            when matched then update set ${RmdbUtil.getUpsertCols("a", "b", colEntiy, pkArr)}
            when not matched then insert (${RmdbUtil.getSqlCols(colEntiy, "a")})
             values(${RmdbUtil.getSqlCols(colEntiy, "b")})
          """.stripMargin
          log.info(s"===================mergesql:$mergeSql++++++++++++")
          //从临时表将数据merge到目标表
          OracleUtils.executeSql(url, adminProp, mergeSql)
        } finally {
          if (isCreateTmpTab)
            OracleUtils.executeSql(url, prop, s"drop table $dbName.$tempTable ")
        }
      }
      var numTemp: Long = 0
      if (oracleDataStatusInfo.getDataNum == null) {
        numTemp = accumulator.value.toLong
      } else {
        numTemp = oracleDataStatusInfo.getDataNum.toLong + accumulator.value.toLong
      }
      //上报数据状态
      oracleDataStatusInfo.setDataNum(String.valueOf(numTemp))
    })

    oracleDataStatusInfo.setCover(if (writeType == "overwrite") true else false)
    DataframeUtils.reportDataStatusRelation(kc, oracleDataStatusInfo, oracleConfig.db_name, oracleConfig.table_name, oracleConfig.extender.meta.clazz,
      compact(render(oracleConfig.extender.meta.params)))
    //上报运维中心指标
    val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
    metric.setProcessDataLValue(oracleDataStatusInfo.getDataNum.toLong)
    CenterMetricUtils.reportSyncProcessData(metric, kc)
    this
  }

  /**
   * 只对STRING字符类型的数据进行重定义长度，以解决spark对oracle进行overwrite时超出255长度无法写入的问题，
   * 长度来自于List[ExtractFieldInfo]中信息，如果没有这个长度信息，则使用默认长度2048
   */
  def createTableColTypes(extractFields: List[ExtractFieldInfo]): String = {
    extractFields
      .filter(v => "STRING".equals(v.data_type.toUpperCase))
      .map(v => s"${v.field.toUpperCase} VARCHAR(${v.length.getOrElse("2048")})")
      .mkString(", ")
  }


  /** 初始化参数 */
  def init(kc: KhaosContext, config: JObject): Unit = {
    implicit val formats = DefaultFormats
    oracleConfig = config.extract[OracleConfig]
    dbName = s"""\"${oracleConfig.db_name}\""""
    tblName = s"""\"${oracleConfig.table_name}\""""

    //权限校验
    val checkResult = MetaUtils.checkWriteAuth(kc,
      oracleConfig.db_name,
      oracleConfig.table_name,
      oracleConfig.extender.auth.clazz,
      compact(render(oracleConfig.extender.auth.params)))
    if (!checkResult) {
      log.error(s"oracle writer init failed, 权限验证未通过!")
      throw new Exception(s"oracle writer init failed, 权限验证未通过!")
    }

    //获取元数据
    oracleMeta = MetaUtils.getOracleMeta(kc,
      oracleConfig.db_name,
      oracleConfig.table_name,
      oracleConfig.extender.meta.clazz,
      compact(render(oracleConfig.extender.meta.params)),
      this)

    if (oracleMeta.tableEntiy.getTblType.equalsIgnoreCase(MetaDataConstants.VIEW)) {
      throw new Exception(s"暂不支持写入视图类型表 ${dbName}.${tblName}")
    }
  }


  /**
   * 转换DF
   *
   * 修改字段名以及填充默认值
   *
   * @param data
   * @param extractFields
   * @return
   */
  def convertDataFrame(data: DataFrame, extractFields: List[ExtractFieldInfo], columnEntiy: util.List[DmTableColumn]): DataFrame = {
    //log.info(s"OracleSink before convert DF schema is: ${data.printSchema}")
    //log.info(s"OracleSink before convert DF is: ${data.show}")
    //生成[字段名,not_null(true/false)]的map
    val fieldAndNotNull = columnEntiy.asScala.map(colEntiy => {
      val colName: String = colEntiy.getColName
      var not_null: String = ""
      colEntiy.getParams.asScala.foreach(map => {
        map.get("pKey") match {
          case "NOT_NULL" => not_null = map.get("pValue")
          case _ =>
        }
      })
      (colName, not_null)
    }).toMap
    //只复制有连线关系的Column
    val colArr = new ArrayBuffer[Column]()
    for (ef <- extractFields) {
      if (!ef.from_field.trim.equals("")) {
        val to_field: String = ef.field
        val from_field: String = ef.from_field
        colArr += data.col(from_field) as (to_field)
      }
    }
    var value: DataFrame = data.select(colArr: _*)
    //对目标DF进行默认值填充和类型转换，填充前需要先转为String类型
    for (ef <- extractFields) {
      val to_field: String = ef.field
      val data_type = ef.data_type
      val field_length = ef.length
      val from_field: String = ef.from_field
      val default_value: String = ef.field_props.default_value
      if (!from_field.trim.equals("")) {
        value = value.withColumn(to_field, value.col(to_field).cast(StringType))
      } else {
        value = value.withColumn(to_field, lit(null).cast(StringType))
      }
      //填充默认值
      if (!default_value.equals("")) {
        value = value.na.fill(default_value, Array(to_field))
      } else if (default_value.equals("") && fieldAndNotNull(to_field).equalsIgnoreCase("true") && from_field.trim.equals("")) {
        log.error(s"目标字段：${to_field}不能为null!")
        throw new Exception(s"目标字段：${to_field}不能为null!")
      }
      //将类型转为目标表所需类型
      if (field_length.isEmpty) {
        value = value.withColumn(to_field, value.col(to_field).cast(getDataType(data_type)))
      } else {
        value = value.withColumn(to_field, value.col(to_field).cast(getDataType(data_type, field_length.get)))
      }
    }
    //log.info(s"OracleSink after convert DF schema is: ${value.printSchema}")
    //log.info(s"OracleSink after convert DF is: ${value.show}")
    value
  }

  /** 转换DF字段类型 具体转换查看 getDataType()方法 */
  def convertDataType(sinkSchema: List[ExtractFieldInfo], data: DataFrame): DataFrame = {
    var value: DataFrame = data
    /*for (ef <- sinkSchema) {
      val to_field: String = ef.field
      val data_type = ef.data_type
      val field_length = ef.length
      //将类型转为目标表所需类型
      if (field_length.isEmpty) {
        value = value.withColumn(to_field, value.col(to_field).cast(getDataType(data_type)))
      } else {
        value = value.withColumn(to_field, value.col(to_field).cast(getDataType(data_type, field_length.get)))
      }
    }*/
    val columns: List[Column] = sinkSchema.map((ef: ExtractFieldInfo) => {
      var column: Column = null
      val to_field: String = ef.field
      val data_type: String = ef.data_type
      val field_length: Option[String] = ef.length
      if (field_length.isEmpty) {
        column = value.col(to_field).cast(getDataType(data_type))
      } else {
        column = value.col(to_field).cast(getDataType(data_type, field_length.get))
      }
      column
    })

    value.select(columns: _*)
  }

  /** 获取目标字段中对应的值的类型，无字段长度参数 */
  def getDataType(dataType: String): String = {
    var value: String = null
    // `string`, `boolean`, `byte`, `short`, `int`, `long`, * `float`, `double`, `decimal`, `date`, `timestamp`.
    value = dataType match {
      case ColumnType.DATE => "date"
      case ColumnType.TIME => "string"
      case ColumnType.DATETIME => "timestamp"
      case _ => "string"
    }
    value
  }

  /** 获取目标字段中对应的值的类型，有字段长度参数 */
  def getDataType(dataType: String, fieldLength: String): String = {
    var value: String = null
    // `string`, `boolean`, `byte`, `short`, `int`, `long`, * `float`, `double`, `decimal`, `date`, `timestamp`.
    value = dataType match {
      case ColumnType.NUMBER => s"decimal($fieldLength)"
      case ColumnType.STRING => "string"
      case ColumnType.DECIMAL => s"decimal($fieldLength)"
      case ColumnType.DATE => "date"
      case ColumnType.TIME => "string"
      case ColumnType.DATETIME => "timestamp"
      case _ => "string"
    }
    value
  }
}