package com.kingsoft.dc.khaos.module.spark.sink

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.{ColumnType, MetaDataConstants, MysqlConstants, OracleConstants, SQLServerConstants, SchedulerConstants}
import com.kingsoft.dc.khaos.module.spark.metadata.sink.{ExtractFieldInfo, SqlServerConfig}
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.model.{MetaDataEntity, RelationDataStatusInfo}
import com.kingsoft.dc.khaos.module.spark.util._
import com.kingsoft.dc.khaos.util.Logging
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.{Column, DataFrame, SaveMode}
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, render}

import java.sql.{Connection, PreparedStatement, Statement}
import java.sql.DriverManager.getConnection
import java.util
import java.util.{Properties, Random}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
 * sqlserver
 *
 * @author fanshengli
 */
class SqlServerSink extends SinkStrategy with Serializable with Logging {
  private val _format = "com.microsoft.sqlserver.jdbc.spark"
  private var sqlServerMeta: MetaDataEntity = null
  private var sqlServerConfig: SqlServerConfig = null
  private var ip = ""
  private var port = ""
  private var instanceName = ""
  private val SINK_MODE_UPSERT = "upsert";
  private var dbName = ""
  private var jdbcUrl = ""
  private var _jdbc_url_param = ""
  private var _jdbc_driver = ""
  private var _has_permission_drop_table: String = ""
  private var executeBatch:Int=10000

  //需要初始化并赋值
  private var ifDeleteOn = false
  private var filter = ""

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JObject,
                    schema: Schema,
                    dataFrame: DataFrame): this.type = {
    init(kc, config)

    val connect = sqlServerMeta.dsSqlServerConnect
    instanceName = connect.getInstanceName
    ip = connect.getHost
    port = connect.getPort
    val userName = connect.getUserName
    val passWord = connect.getPassWord

    val tblEntiy = sqlServerMeta.tableEntiy //表详情
    val colEntiy = sqlServerMeta.columnEntiy
    dbName = "\"" + sqlServerConfig.db_name + "\""

    val tblName = "\"" + sqlServerConfig.table_name + "\""
    val writeType = sqlServerConfig.write_option.toLowerCase
    val extractFields = sqlServerConfig.extract_fields
    ifDeleteOn = sqlServerConfig.IfDeleteOn

    var convertDF: DataFrame = DataframeUtils.setDefaultValue(extractFields, colEntiy, dataFrame)
    convertDF = convertDataType(extractFields, convertDF)
    var resultDF = convertDF
    //jdbcUrl = s"jdbc:mysql://${ip}:${port}/$dbName${_jdbc_url_param}"
    jdbcUrl = s"jdbc:sqlserver://${ip}:${port};databaseName=$instanceName${_jdbc_url_param}"
    val driver = _jdbc_driver
    log.info("Sqlserver Sink driver=" + driver + " url=" + jdbcUrl)
    val prop = new Properties
    //prop.put("driver", driver)
    prop.put("user", userName)
    prop.put("password", passWord)
    //当savemode是overwrite时，truncate在sqlserver中的表，而不是删除再重建其现有的表。
    //    if ("overwrite".equals(writeType)) {
    //      prop.put("truncate", "true")
    //    }

    //写入前删除数据按钮是否打开
    if (ifDeleteOn) {
      log.info("ifdeleteon is true")
      filter = sqlServerConfig.filter
      val deleteSql: String = deleteTableQuery(tblName)
      log.info(s"deletesql: ${deleteSql}")
      //var df: DataFrame = kc.sparkSession.read.jdbc(jdbcUrl, deleteSql, prop)
      val desql: Boolean = executeSql(jdbcUrl, userName, passWord, deleteSql)
      if (desql) {
        log.info("删除历史数据成功")
      }
    }

    //获得真实物理表
    var tblNameAndDF: mutable.HashMap[String, DataFrame] = TableSplitUtils.getSinkRealTable(kc, dbName, tblName, this, sqlServerConfig.extender.meta.clazz,
      compact(render(sqlServerConfig.extender.meta.params)), resultDF, null)
    //    tblNameAndDF = tblNameAndDF.map(tp => (tp._1.toLowerCase(), tp._2))

    //插入的表的字段名称必须和DF中Schema的字段完全一致才能成功,顺序和个数可以不一致
    //分区数默认和传入的DF分区数一致，可再次进行重新分区
    var relationDataStatusInfo = new RelationDataStatusInfo
    tblNameAndDF.foreach(tp => {
      val realTblName: String = dbName + "." + tp._1
      val realDataframe: DataFrame = tp._2
      val (resultData, accumulator) = DataframeUtils.calculateDataNum(kc, realDataframe, "SqlServerSink")

      if (!SINK_MODE_UPSERT.equals(writeType)) {
        resultData.write.format(_format).mode(writeType).jdbc(jdbcUrl, realTblName, prop)
      } else {
        val pkArr = sqlServerConfig.logic_pk; // upsert 主键信息
        if ("1".equals(_has_permission_drop_table)) {
          var conn: Connection = null
          var statement: Statement = null
          var isCreateTmpTab = false
          //以标准表方式处理 生成临时表 "KTMP_" + 时间戳 + 8位随机数
          var tempTable = "KTMP_" + System.currentTimeMillis + "_" + new Random().nextInt(100000000)
          try {
            //1. 创建conn
            val driver = _jdbc_driver
            Class.forName(driver)
            conn = getConnection(jdbcUrl, userName, passWord)
            statement = conn.createStatement()

            tempTable = s"""\"$tempTable\""""
            val sqlCreatTab = s"select * into $dbName.$tempTable from $realTblName where 1=0"
            log.info("建表语句：" + sqlCreatTab)
            statement.execute(sqlCreatTab)
            isCreateTmpTab = true

            //2. 插入源表数据到临时表
            //prop.put("url", jdbcUrl)
            //prop.put("dbtable", tempTable)
            resultData.write.mode(SaveMode.Append).jdbc(jdbcUrl, s"$dbName.$tempTable", prop)

            //3. 执行 merge 语句
            val mergeSql =
              s"""
            merge into $realTblName a using (select ${RmdbUtil.getCols(colEntiy)} from $dbName.${tempTable} ) b on( ${RmdbUtil.joinKeys("a", "b", pkArr)} )
            when matched then update set ${RmdbUtil.getUpsertCols("a", "b", colEntiy, pkArr)}
            when not matched then insert (${RmdbUtil.getCols(colEntiy)}) values(${RmdbUtil.getSqlCols(colEntiy, "b")});
          """.stripMargin
            log.info(s"===================mergesql:$mergeSql++++++++++++")
            //从临时表将数据merge到目标表
            statement.execute(mergeSql)

            if (isCreateTmpTab) {
              val dropSql = s"drop table $dbName.$tempTable"
              log.info(s"===================删除临时表:$dropSql++++++++++++")
              statement.execute(dropSql)
            }
          } catch {
            case e: Exception => {
              log.error("sqlserver运行upsert过程中报错：", e.getMessage)
              e.printStackTrace()
              throw new Exception(s"sqlserver upsert writer failed," + e.getMessage)
            }
          } finally {
            if (null == statement) {
              statement.close()
            }
            if (null == conn) {
              conn.close()
            }
          }
        } else {
          resultDF.rdd.foreachPartition(iter => {
            log.info("线程:{}-------进入foreachPartition",Thread.currentThread().getName)
            var onePartitionJdbcConn: Connection = null
            var oneStmt: Statement = null
            var insertPs: PreparedStatement = null
            var tempTable = "##KTMP_" + System.currentTimeMillis + "_" + new Random().nextInt(100000000)
            try {
              //1. 创建conn
              val driver = _jdbc_driver
              Class.forName(driver)
              onePartitionJdbcConn = getConnection(jdbcUrl, userName, passWord)
              onePartitionJdbcConn.setAutoCommit(false)
              oneStmt = onePartitionJdbcConn.createStatement()

              tempTable = s"""\"$tempTable\""""
              val sqlCreatTab = s"select * into $dbName.$tempTable from $realTblName where 1=0"
              log.info("建表语句：" + sqlCreatTab)
              oneStmt.execute(sqlCreatTab)

              //2. 将dataFrame数据转换为行数据插入到临时表中
              val repeat = new Array[Char](colEntiy.size())
              util.Arrays.fill(repeat, '?')
              val repeatStr: String = new String(repeat)
              val selectStr: String = repeatStr.replace("", " ").trim.replaceAll(" ", ",")
              val insertTmpSql = s"insert into $dbName.$tempTable values (" + selectStr + ")"
              log.info("创建insert into语句===》" + insertTmpSql)
              insertPs = onePartitionJdbcConn.prepareStatement(insertTmpSql)
              var count = 0
              while (iter.hasNext) {
                val row = iter.next
                val fields = row.schema.fields
                for (i <- 0 until fields.length) {
                  //var dataType: DataType = fields(i).dataType   // println(i + "\t" + fields(i).name + "\t" + fields(i).dataType)
                  val fieldValue = row.get(i)
                  insertPs.setObject(i + 1, fieldValue)
                }
                insertPs.addBatch
                count += 1
                if (0 == count % executeBatch) {
                  log.info("即将提交count量={}", count)
                  insertPs.executeBatch
                }
              } // end while
              log.info("--fsl调试 01")
              insertPs.executeBatch
              log.info("--fsl调试 02")

              //3. 对临时表的数据merge目标mysql表，完成真正的upsert处理
              val mergeSql =
                s"""
            merge into $realTblName a using (select ${RmdbUtil.getCols(colEntiy)} from $dbName.${tempTable} ) b on( ${RmdbUtil.joinKeys("a", "b", pkArr)} )
            when matched then update set ${RmdbUtil.getUpsertCols("a", "b", colEntiy, pkArr)}
            when not matched then insert (${RmdbUtil.getCols(colEntiy)}) values(${RmdbUtil.getSqlCols(colEntiy, "b")});
          """.stripMargin
              log.info(s"===================mergesql:$mergeSql++++++++++++")
              //从临时表将数据merge到目标表
              oneStmt.execute(mergeSql)
              log.info("--fsl调试 03")
              onePartitionJdbcConn.commit()
              log.info("--fsl调试 04")
            } catch {
              case e: Exception => {
                log.error("运行upsert过程中报错：", e.getMessage)
                e.printStackTrace()
                onePartitionJdbcConn.rollback()
                throw new Exception(s"sqlserver upsert writer failed," + e.getMessage)
              }
            } finally {
              if (oneStmt != null) {
                oneStmt.close()
              }
              if (insertPs != null) {
                insertPs.close()
              }
              if (null != onePartitionJdbcConn) {
                onePartitionJdbcConn.close()
                log.info(s"关闭$onePartitionJdbcConn 连接成功")
              }
            }
          }) //end rdd foreach
        }

      } //end upsert

      var numTemp: Long = 0
      if (relationDataStatusInfo.getDataNum == null) {
        numTemp = accumulator.value.toLong
      } else {
        numTemp = relationDataStatusInfo.getDataNum.toLong + accumulator.value.toLong
      }

      //数据条数累加
      relationDataStatusInfo.setDataNum(String.valueOf(numTemp))
    })
    //上报数据状态
    relationDataStatusInfo.setCover(if (writeType == "overwrite") true else false)
    DataframeUtils.reportDataStatusRelation(kc, relationDataStatusInfo, sqlServerConfig.db_name, sqlServerConfig.table_name, sqlServerConfig.extender.meta.clazz,
      compact(render(sqlServerConfig.extender.meta.params)))
    //上报运维中心指标
    val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
    metric.setProcessDataLValue(relationDataStatusInfo.getDataNum.toLong)
    CenterMetricUtils.reportSyncProcessData(metric, kc)
    this
  }

  def executeSql(url: String, user: String, password: String, sql: String): Boolean = {
    var conn: Connection = null
    val driver = _jdbc_driver
    try {
      Class.forName(driver)
      conn = getConnection(url, user, password)
      val st = conn.createStatement
      for (elem <- sql.split(";", -1).toList) {
        st.addBatch(elem)
      }
      st.executeBatch()
      st.close
      conn.close
      true
    } catch {
      case e: Exception =>
        e.printStackTrace()
        throw e
        false
    }
  }

  //生成删除表的sql
  def deleteTableQuery(tblName: String): String = {
    var tableQuery = ""
    if (filter != null && !filter.trim.equals("")) {
      tableQuery = s"delete from $dbName.$tblName where $filter"
    } else {
      throw new Exception("删除历史数据开关打开后，where条件不能为空！")
    }
    tableQuery
  }

  /**
   * 初始化参数
   *
   * @param kc
   * @param config
   */
  def init(kc: KhaosContext, config: JObject): Unit = {
    if (true == false) {
      val str = "21202F2938212B3E22272626252E434D"
    }
    implicit val formats = DefaultFormats
    sqlServerConfig = config.extract[SqlServerConfig]

    //load config 配置文件参数
    loadProperties(kc)

    //权限校验
    val checkResult = MetaUtils.checkWriteAuth(kc,
      sqlServerConfig.db_name,
      sqlServerConfig.table_name,
      sqlServerConfig.extender.auth.clazz,
      compact(render(sqlServerConfig.extender.auth.params)))
    if (!checkResult) {
      log.error(s"SqlServer writer init failed, 权限验证未通过!")
      throw new Exception(s"SqlServer writer init failed, 权限验证未通过!")
    }

    //获取元数据
    sqlServerMeta = MetaUtils.getSQLServerMeta(kc,
      sqlServerConfig.db_name,
      sqlServerConfig.table_name,
      sqlServerConfig.extender.meta.clazz,
      compact(render(sqlServerConfig.extender.meta.params)),
      this)

    if (sqlServerMeta.tableEntiy.getTblType.equalsIgnoreCase(MetaDataConstants.VIEW)) {
      throw new Exception(s"暂不支持写入视图类型表 ${sqlServerConfig.db_name}.${sqlServerConfig.table_name}")
    }
  }

  /**
   * 只对STRING字符类型的数据进行重定义长度，以解决spark对oracle进行overwrite时超出255长度无法写入的问题，
   * 长度来自于List[ExtractFieldInfo]中信息，如果没有这个长度信息，则使用默认长度2048
   */
  def createTableColTypes(extractFields: List[ExtractFieldInfo]): String = {
    extractFields
      .filter(v => "STRING".equals(v.data_type.toUpperCase))
      .map(v => s"${v.field.toUpperCase} VARCHAR(${v.length.getOrElse("2048")})")
      .mkString(", ")
  }

  /**
   * load config properties 配置
   *
   * @param kc
   */
  def loadProperties(kc: KhaosContext): Unit = {
    val sqlserverProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.sqlserver.sink.").toMap
    log.info("SqlServer Properties")
    sqlserverProperties.foreach { case (k, v) => log.info(k + "   " + v) }
    _jdbc_url_param = sqlserverProperties.getOrElse(SQLServerConstants.MODULE_SQLSERVER_SINK_JDBC_URL_PARAM, "")
    _jdbc_driver = sqlserverProperties.getOrElse(SQLServerConstants.MODULE_SQLSERVER_SINK_JDBC_DRIVER, "com.microsoft.sqlserver.jdbc.SQLServerDriver")
    //项目用户是否有权限删除表(用于upsert场景存储临时表数据)
    _has_permission_drop_table = sqlserverProperties.getOrElse(SQLServerConstants.MODULE_SQLSERVER_SINK_HASPERMISSION_DROPTABLE, "false")
    executeBatch = Integer.valueOf(sqlserverProperties.getOrElse(SQLServerConstants.MODULE_SQLSERVER_SINK_EXECUTEBATCH, "10000"))
  }

  /**
   * 转换DF
   *
   * 修改字段名以及填充默认值
   *
   * @param data
   * @param extractFields
   * @return
   */
  def convertDataFrame(data: DataFrame, extractFields: List[ExtractFieldInfo], columnEntiy: util.List[DmTableColumn]): DataFrame = {
    //log.info(s"MySQLSink before convert DF schema is: ${data.printSchema}")
    //log.info(s"MySQLSink before convert DF is: ${data.show}")
    //生成[字段名,not_null(true/false)]的map
    val fieldAndNotNull = columnEntiy.asScala.map(colEntiy => {
      val colName: String = colEntiy.getColName
      var not_null: String = ""
      colEntiy.getParams.asScala.foreach(map => {
        map.get("pKey") match {
          case "NOT_NULL" => not_null = map.get("pValue")
          case _ =>
        }
      })
      (colName, not_null)
    }).toMap
    //只复制有连线关系的Column
    val colArr = new ArrayBuffer[Column]()
    for (ef <- extractFields) {
      if (!ef.from_field.trim.equals("")) {
        val to_field: String = ef.field
        val from_field: String = ef.from_field
        colArr += data.col(from_field) as (to_field)
      }
    }
    var value: DataFrame = data.select(colArr: _*)
    //对目标DF进行默认值填充和类型转换，填充前需要先转为String类型
    for (ef <- extractFields) {
      val to_field: String = ef.field
      val data_type = ef.data_type
      val field_length = ef.length
      val from_field: String = ef.from_field
      val default_value: String = ef.field_props.default_value
      if (!from_field.trim.equals("")) {
        //将time类型的值去掉空格，否则写入mysql会报错
        if (data_type.equalsIgnoreCase("TIME")) {
          value = value.withColumn(to_field, trim(value.col(to_field).cast(StringType)))
        } else {
          value = value.withColumn(to_field, value.col(to_field).cast(StringType))
        }
      } else {
        value = value.withColumn(to_field, lit(null).cast(StringType))
      }
      //填充默认值
      if (!default_value.equals("")) {
        value = value.na.fill(default_value, Array(to_field))
      } else if (default_value.equals("") && fieldAndNotNull(to_field).equalsIgnoreCase("true") && from_field.trim.equals("")) {
        log.error(s"目标字段：${to_field}不能为null!")
        throw new Exception(s"目标字段：${to_field}不能为null!")
      }
      //将类型转为目标表所需类型
      if (field_length.isEmpty) {
        value = value.withColumn(to_field, value.col(to_field).cast(getDataType(data_type)))
      } else {
        value = value.withColumn(to_field, value.col(to_field).cast(getDataType(data_type, field_length.get)))
      }
    }
    //log.info(s"MySQLSink after convert DF schema is: ${value.printSchema}")
    //log.info(s"MySQLSink after convert DF is: ${value.show}")
    value
  }

  /** 转换DF字段类型 具体转换查看 getDataType()方法 */
  def convertDataType(sinkSchema: List[ExtractFieldInfo], data: DataFrame): DataFrame = {
    var value: DataFrame = data
    /*for (ef <- sinkSchema) {
      val to_field: String = ef.field
      val data_type = ef.data_type
      val field_length = ef.length
      //将类型转为目标表所需类型
      if (field_length.isEmpty) {
        value = value.withColumn(to_field, value.col(to_field).cast(getDataType(data_type)))
      } else {
        value = value.withColumn(to_field, value.col(to_field).cast(getDataType(data_type, field_length.get)))
      }
    }*/
    val columns: List[Column] = sinkSchema.map((ef: ExtractFieldInfo) => {
      var column: Column = null
      val to_field: String = ef.field
      val data_type: String = ef.data_type
      val field_length: Option[String] = ef.length
      if (field_length.isEmpty) {
        column = value.col(to_field).cast(getDataType(data_type))
      } else {
        column = value.col(to_field).cast(getDataType(data_type, field_length.get))
      }
      column
    })

    value.select(columns: _*)
  }

  /** 获取目标字段中对应的值的类型，无字段长度参数 */
  def getDataType(dataType: String): String = {
    var value: String = null
    // `string`, `boolean`, `byte`, `short`, `int`, `long`, * `float`, `double`, `decimal`, `date`, `timestamp`.
    value = dataType match {
      case ColumnType.DATE => "date"
      case ColumnType.TIME => "string"
      case ColumnType.DATETIME => "timestamp"
      case _ => "string"
    }
    value
  }

  /** 获取目标字段中对应的值的类型，有字段长度参数 */
  def getDataType(dataType: String, fieldLength: String): String = {
    var value: String = null
    // `string`, `boolean`, `byte`, `short`, `int`, `long`, * `float`, `double`, `decimal`, `date`, `timestamp`.
    value = dataType match {
      case ColumnType.NUMBER => "long"
      case ColumnType.STRING => "string"
      case ColumnType.DECIMAL => s"decimal($fieldLength)"
      case ColumnType.DATE => "date"
      case ColumnType.TIME => "string"
      case ColumnType.DATETIME => "timestamp"
      case _ => "string"
    }
    value
  }
}