package com.kingsoft.dc.khaos.module.spark.sink

import java.sql.Date
import java.util
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.col.DmTableColumn
import com.kingsoft.dc.khaos.extender.meta.model.ds.ESConnect
import com.kingsoft.dc.khaos.extender.meta.model.table.DmTable
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.constants.ColumnType
import com.kingsoft.dc.khaos.module.spark.metadata.sink._
import com.kingsoft.dc.khaos.module.spark.model.RelationDataStatusInfo
import com.kingsoft.dc.khaos.module.spark.model.center.metric.SyncProcessDataMetric
import com.kingsoft.dc.khaos.module.spark.util.{CenterMetricUtils, DataframeUtils, ESUserProvider, FileUtils, MetaUtils, UserDefinedDataFrameFieldExtractor}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.spark.SparkFiles
import org.apache.spark.sql.functions.{col, from_json, lit, schema_of_json, to_json, translate, trim}
import org.apache.spark.sql.types.{DataType, DateType, StringType, StructType}
import org.apache.spark.sql.{Column, DataFrame, SaveMode}
import org.elasticsearch.hadoop.cfg.ConfigurationOptions
import org.elasticsearch.spark.sql.DefaultSource15
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods.{compact, render}
import org.json4s.{DefaultFormats, JsonAST}

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}


/**
 * create by yansu on 2019/07/02 11:41
 */
class ESSink extends SinkStrategy with Logging with Serializable {
  private var _kc: KhaosContext = null
  private var _schema: Schema = null
  private var _esSinkConfig: ESSinkConfig = null
  private val writeModeMap = Map("append" -> SaveMode.Append,
    "overwrite" -> SaveMode.Overwrite)
  private var writeMode: String = null
  private var _EsColsInfo: util.List[DmTableColumn] = null
  private var _EsTblInfo: DmTable = null
  private var esConnect: ESConnect = null
  private var defaultDs = true
  private var moduleId = ""
  // doc id
  private var _id: String = ""
  private var _ESDataStatusInfo: RelationDataStatusInfo = null

  //kerberos
  private var principal = ""
  private var keytab = ""
  private var keytabPath = ""
  private var krb5Conf = ""
  private var krb5ConfPath = ""
  private var spnegoPrincipal = ""

  // 动态索引format
  object FormatEnum {
    val YYYY = "YYYY"
    val YYYYMM = "YYYYMM"
    val YYYYMMDD = "YYYYMMDD"
  }

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JsonAST.JObject,
                    schema: Schema,
                    dataSet: DataFrame): Any = {
    // 解析config
    implicit val formats = DefaultFormats
    val esSinkConfig = config.extract[ESSinkConfig]
    _esSinkConfig = esSinkConfig
    this.moduleId = module_id
    writeMode = _esSinkConfig.write_option.get.trim.toLowerCase
    _schema = schema
    _kc = kc

    // 权限校验
    if (!_esSinkConfig.extender.get.auth.clazz.isEmpty) {
      MetaUtils.checkWriteAuth(_kc,
        _esSinkConfig.db_name,
        _esSinkConfig.table_name,
        _esSinkConfig.extender.get.auth.clazz,
        compact(render(_esSinkConfig.extender.get.auth.params)))
    }

    // 设置es的唯一标识_id
    if (_id.equals("")) {
      // 根据用户规则生成doc_id
      val user_id = getDocIDByFileds(esSinkConfig.doc_id)
      if (!user_id.isEmpty) {
        _id = user_id
      } else {
        _id = ""
      }
      // 解析_id 判断拼接字段是否在df中
      //      val judgmentExistDocId = hasNotValidField(_id, dataSet.columns)
      //      if (!judgmentExistDocId) {
      //        log.info("doc_id is right!")
      //      } else {
      //        log.error("doc_id is not right!")
      //        throw new Exception("doc_id is not right!")
      //      }
    }
    // 加载es配置
    var esConfig: mutable.HashMap[String, String] = initEsConf()
    // 转换类型 (设置默认值、改名、构造嵌套字段)
    val changeNameDF: DataFrame = handDataFrameType(dataSet, _esSinkConfig.extract_fields)

    val sqlContext = kc.sparkSession.sqlContext
    val ds: DefaultSource15 = new DefaultSource15
    log.info("开始写入ES...")

    // 是否开启动态索引
    var flag = "false"
    var colName = ""
    var format = ""

    _EsTblInfo.getParams.asScala.foreach(map => {
      map.get("pKey") match {
        case "USE_TEMPLATE" => flag = map.get("pValue").toString
        case _ =>
      }
      map.get("pKey") match {
        case "TEMPLATE_PATTERN_COL" => {
          colName = map.get("pValue").toString
          if (colName.contains("tpart_")) {
            colName = colName.substring(6)
          }
        }
        case _ =>
      }
      map.get("pKey") match {
        case "TEMPLATE_PATTERN_FORMAT" => format = map.get("pValue").toString
        case _ =>
      }
    })

    // 从数据管理获取到时间分区字段和格式
    val newFiledAndFormatMap = Map[String, String]((colName, format))
    if (flag.trim.toLowerCase.equals("true")) {
      // 动态索引和结果集
      val resMap = setDynamicIndex(newFiledAndFormatMap, changeNameDF)

      _ESDataStatusInfo = new RelationDataStatusInfo
      // 修复动态索引无数据不上报问题 20200618
      if (resMap.size == 0) {
        _ESDataStatusInfo.setDataNum("0")
        // 上报数据状态
        DataframeUtils.reportDataStatusRelation(_kc, _ESDataStatusInfo, _esSinkConfig.db_name, _esSinkConfig.table_name, _esSinkConfig.extender.get.meta.clazz,
          compact(render(_esSinkConfig.extender.get.meta.params)))
      }
      for (indexAndDF <- resMap) {
        esConfig += ("es.resource" -> s"${indexAndDF._1}/${_esSinkConfig.table_name}")
        log.info("es.resource ==>" + indexAndDF._1 + "/" + _esSinkConfig.table_name)
        val (resultData, accumulator) = DataframeUtils.calculateDataNum(_kc, indexAndDF._2, "ESSink")

        if (esConnect.getConnectType.equals("krbsType")) {
          log.info("ESSink-executor端开启kerberos认证中......")

          initKerberosConf(esConfig)
        }

        ds.createRelation(sqlContext,
          writeModeMap.getOrElse("append", SaveMode.Append),
          esConfig.toMap,
          resultData)
        _ESDataStatusInfo.setDataNum(accumulator.value.toString)
        // 写入完成后移除es.resource
        esConfig.remove("es.resource")
        // 上报数据状态
        DataframeUtils.reportDataStatusRelation(_kc, _ESDataStatusInfo, _esSinkConfig.db_name, _esSinkConfig.table_name, _esSinkConfig.extender.get.meta.clazz,
          compact(render(_esSinkConfig.extender.get.meta.params)))
        //上报运维中心指标
        val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
        metric.setProcessDataLValue(_ESDataStatusInfo.getDataNum.toLong)
        CenterMetricUtils.reportSyncProcessData(metric, kc)
      }
    } else {
      _ESDataStatusInfo = new RelationDataStatusInfo
      val (resultData, accumulator) = DataframeUtils.calculateDataNum(_kc, changeNameDF, "ESSink")
      esConfig += ("es.resource" -> s"${_esSinkConfig.db_name}/${_esSinkConfig.table_name}")
      log.info("Config ==> " + esConfig.mkString(" "))

      if (esConnect.getConnectType.equals("krbsType")) {
        log.info("ESSink-executor端开启kerberos认证中......")

        initKerberosConf(esConfig)
      }

      ds.createRelation(sqlContext,
        writeModeMap.getOrElse("append", SaveMode.Append),
        esConfig.toMap,
        resultData)
      _ESDataStatusInfo.setDataNum(accumulator.value.toString)
      // 上报数据状态
      DataframeUtils.reportDataStatusRelation(_kc, _ESDataStatusInfo, _esSinkConfig.db_name, _esSinkConfig.table_name, _esSinkConfig.extender.get.meta.clazz,
        compact(render(_esSinkConfig.extender.get.meta.params)))
      //上报运维中心指标
      val metric: SyncProcessDataMetric = CenterMetricUtils.buildSyncProcessDataMetric(kc)
      metric.setProcessDataLValue(_ESDataStatusInfo.getDataNum.toLong)
      CenterMetricUtils.reportSyncProcessData(metric, kc)
    }
  }

  // 加载es配置
  def initEsConf(): mutable.HashMap[String, String] = {
    log.info("加载es配置...")
    var esConfig = new mutable.HashMap[String, String]
    esConfig += ("es.write.operation" -> writeMode)
    implicit val formats = DefaultFormats
    val index_name = _esSinkConfig.db_name
    val type_name = _esSinkConfig.table_name
    val class_name = _esSinkConfig.extender.get.meta.clazz

    val esMeta =
      MetaUtils.getESMeta(_kc, index_name, type_name, class_name, compact(render(_esSinkConfig.extender.get.meta.params)))

    esConnect = esMeta.getDsEsConnect
    // 获取字段属性
    _EsColsInfo = esMeta.getColumnEntiy
    // 获取表属性
    _EsTblInfo = esMeta.getTableEntiy
    //是否内部数据源
    defaultDs = esMeta.defaultDs
    val connectType: String = esConnect.getConnectType
    connectType match {
      case "http" =>
      // http 模式情况下，无需增加额外的认证信息
      case "https" =>

        //解压base64格式的文件，并重写到spark app 临时文件夹中
        val (keyStorePath, trustStorePath) = getSSLFile(esConnect)

        esConfig += (
          "es.net.ssl" -> "true",

          "es.net.ssl.keystore.type" -> (if (esConnect.getKeyStoreType.isEmpty) "JKS" else esConnect.getKeyStoreType),
          "es.net.ssl.keystore.pass" -> esConnect.getKeyStorePass,
          "es.net.ssl.keystore.location" -> s"file://$keyStorePath",

          "es.net.ssl.truststore.pass" -> esConnect.getTrustStorePass,
          "es.net.ssl.truststore.location" -> s"file://$trustStorePath"
        )

      case "krbsType" =>
        if (defaultDs) {
          log.info("ESSink driver端开启kerberos认证中......")
          principal = _kc.conf.getString("proxy.user")
          keytab = principal + ".keytab"
          krb5ConfPath = _kc.conf.getString("proxy.krb5.conf")
          keytabPath = System.getenv("SPARK_YARN_STAGING_DIR") + "/" + keytab
        } else {
          log.info("ESSink driver端开启外部kerberos认证中......")
          principal = esConnect.getPrincipal
          keytab = "/es_sink_" + moduleId + ".keytab"
          krb5Conf = "/es_sink_krb5_" + moduleId + ".conf"
          keytabPath = System.getenv("SPARK_YARN_STAGING_DIR") + "/" + keytab
          krb5ConfPath = System.getenv("SPARK_YARN_STAGING_DIR") + "/" + krb5Conf
          FileUtils.decoderBase64File(esConnect.getKeytabFile, keytabPath, FileSystem.newInstance(new Configuration()))
          FileUtils.decoderBase64File(esConnect.getKrb5File, krb5ConfPath, FileSystem.newInstance(new Configuration()))
          _kc.sparkSession.sparkContext.addFile(krb5ConfPath)
        }

        log.info("principal=>" + principal)
        log.info("keytabPath=>" + keytabPath)
        log.info("krb5ConfPath=>" + krb5ConfPath)
        _kc.sparkSession.sparkContext.addFile(keytabPath)
        initKerberosConf(esConfig)

      case _ =>
        log.error("未知连接方式!")
        throw new Exception("未知连接方式")
    }

    // 账户密码，非必填选项
    if (esConnect.getUsername != null && esConnect.getPassword != null) {
      esConfig += (
        "es.net.http.auth.user" -> esConnect.getUsername,
        "es.net.http.auth.pass" -> esConnect.getPassword
      )
    }

    // 请求数据管理得到es的物理地址(可能为一个或者多个)
    val httpUrls = esConnect.getHttpUrls

    if (httpUrls.contains(",")) {

      // 配置conf
      //          esConfig += ("es.nodes" -> nodesStringBuffer.toString.trim)
      esConfig += ("es.nodes.wan.only" -> "true")
      esConfig += ("es.nodes" -> httpUrls)
    } else {
      log.info("es.nodes ==> " + httpUrls.split(":")(1).substring(2))
      log.info("es.port ==> " + httpUrls.split(":")(2).replace("/", ""))
      // 配置conf
      esConfig += ("es.nodes.wan.only" -> "true")
      esConfig += ("es.nodes" -> httpUrls.split(":")(1).substring(2))
      esConfig += ("es.port" -> httpUrls.split(":")(2).replace("/", ""))
    }

    // 根据gen_rule_name设置es.mapping.id
    val gen_rule_name = compact(_esSinkConfig.doc_id.obj.head._2)
    gen_rule_name.substring(1, gen_rule_name.length - 1) match {

      case "pattern" =>
        esConfig += ("es.mapping.default.extractor.class" -> classOf[UserDefinedDataFrameFieldExtractor].getName)
        // 解析doc_id,用es中的字段名替换DataFrame中的字段名
        val resolvedIds = resolveVars(_id)
        var idAndMode = new ArrayBuffer[String]()
        for (esField <- _esSinkConfig.extract_fields) {
          for (resolvedId <- resolvedIds) {
            if (resolvedId.equalsIgnoreCase(esField.field)) {
              idAndMode += "{" + esField.field + "}"
            }
          }
        }
        // 重新用连接符进行连接
        _id = idAndMode.mkString(_esSinkConfig.doc_id.obj.last._2.extract[PatternRule].stitching_mode)
        log.info(s"使用 ${_id} 作为doc_id")
        esConfig += ("es.mapping.id" -> _id)

      case "field" =>
        for (elem <- _esSinkConfig.extract_fields) {
          if (elem.field.equalsIgnoreCase(_id)) {
            _id = elem.field.trim
          }
        }
        log.info(s"使用 ${_id} 作为doc_id")
        esConfig += ("es.mapping.id" -> _id)
      case "uuid" =>
        log.info("使用自动生成doc_id")
      case _ =>
        log.info("未知doc_id类型,使用自动生成UUID ")
    }

    esConfig
  }


  def initKerberosConf(esConfig: mutable.HashMap[String, String]): Unit = {
    spnegoPrincipal = "HTTP/" + esConnect.getPrincipal.split("/")(1)
    log.info("spnegoPrincipal=>" + spnegoPrincipal)
    System.setProperty("sun.security.krb5.debug", "true")
    System.setProperty("sun.security.spnego.debug", "true")
    System.setProperty("es.security.indication", "true")
    System.setProperty("java.security.krb5.conf", if (defaultDs) krb5ConfPath else SparkFiles.get(krb5Conf))

    esConfig += (
      "es.http.timeout" -> "5m",
      "es.http.retries" -> "50",
      ConfigurationOptions.ES_SECURITY_AUTHENTICATION -> "kerberos",
      ConfigurationOptions.ES_NET_SPNEGO_AUTH_ELASTICSEARCH_PRINCIPAL -> spnegoPrincipal,
      ConfigurationOptions.ES_SECURITY_USER_PROVIDER_CLASS -> "com.kingsoft.dc.khaos.module.spark.util.ESUserProvider",
      ESUserProvider.SPARK_ES_PRINCIPAL -> principal,
      ESUserProvider.SPARK_ES_KEYTAB -> keytab,
      ESUserProvider.ES_NET_SPNEGO_AUTH_ELASTICSEARCH_PRINCIPAL -> spnegoPrincipal
    )
  }

  /**
   * 将ssl认证文件写到spark任务的临时文件夹中并返回地址
   */
  def getSSLFile(esConnect: ESConnect): (String, String) = {

    val localDirs = System.getenv("LOCAL_USER_DIRS")
    val local = if (localDirs.contains(",")) localDirs.split(",")(0) else localDirs

    val keyStorePath = s"$local/es_ssl_sink/${esConnect.getKeyStorePath.split("/").last}"
    val trustStorePath = s"$local/es_ssl_sink/${esConnect.getTrustStorePath.split("/").last}"

    FileUtils.decoderBase64File(esConnect.getKeyStoreFile, keyStorePath)
    FileUtils.decoderBase64File(esConnect.getTrustStoreFile, trustStorePath)

    (keyStorePath, trustStorePath)
  }


  def handDataFrameType(data: DataFrame, esFileds: List[EsExtractFieldInfo]): DataFrame = {
    var df = data
    // 改名并添加默认值
    if (writeMode.equals("upsert")) {
      df = setUpsertModeDefaultValue(esFileds, _EsColsInfo, data) //upsert 模式
    } else {
      df = setDefaultValue(esFileds, _EsColsInfo, data) //index 模式
    }

    val columns = esFileds.map(v =>
      v.data_type.toLowerCase match {
        case "number" =>
          //针对数字类型的数据，转为string写入es不会损失float和double类型的精度
          col(v.field).cast("string")
        case "object" =>
          //针对处理object类型中存在null值的情况，先拿到此类型不为null的数据，得到json的schema，然后转成struct格式写入es
          val isNullObjectDF = df.filter(s"${v.field} is not null")
          if (isNullObjectDF.isEmpty) {
            col(v.field)
          } else {
            val objectCol = isNullObjectDF.select(v.field).take(1)(0).getString(0)
            val jsonSchema = df.select(schema_of_json(objectCol).alias("schema")).take(1)(0).getString(0)
            from_json(col(v.field), DataType.fromDDL(jsonSchema).asInstanceOf[StructType]).as(v.field)
          }
        case _ =>
          col(v.field)
      })

    df.select(columns: _*)
  }

  /**
   * 构建object类型嵌套df
   *
   * @param data     DataFrame
   * @param esFileds es字段
   * @return 构造后的DataFrame
   */
  @deprecated
  def structDataFrame(data: DataFrame, esFileds: List[EsExtractFieldInfo]): DataFrame = {
    var df = data
    // 改名并添加默认值
    if (writeMode.equals("upsert")) {
      df = setUpsertModeDefaultValue(esFileds, _EsColsInfo, data) //upsert 模式
    } else {
      df = setDefaultValue(esFileds, _EsColsInfo, data) //index 模式
    }

    // 第一层字段列表:ListBuffer[String](id,name,g,k)
    val objMap = new ListBuffer[String]()
    // 第二层字段列表:ListBuffer[Any](id,name,ListBuffer[String](a:b,c),ListBuffer[String](d,e))
    val filedArr = new ArrayBuffer[Any]()
    for (filed <- esFileds) {
      if (!filed.from_field.trim.equals("")) {
        var filedName = filed.field
        val obj = filedName.split(":")
        if (obj.length == 2) { // 字段 g:a
          val objName = obj(0)
          filedName = obj(1)
          if (objMap.contains(objName)) {
            // 如果obj列表中存在obj了,则将对应的列名添加到对应的filearr
            val index = objMap.indexOf(objName)
            filedArr(index).asInstanceOf[ListBuffer[String]] += filedName
          } else {
            // 如果obj列表不存在obj,添加到objmap中,对应列名添加到列表中
            objMap += objName
            filedArr += new ListBuffer[String]
            filedArr(objMap.indexOf(objName)).asInstanceOf[ListBuffer[String]] += filedName
          }
        } else if (obj.length == 3) {
          // 字段 g:a:b
          val oneName = obj(0)
          val twoName = obj(1)
          filedName = twoName + ":" + obj(2)

          if (objMap.contains(oneName)) {
            // 如果obj列表中存在obj了,则将对应的列名添加到对应的filearr
            val index = objMap.indexOf(oneName)
            filedArr(index).asInstanceOf[ListBuffer[String]] += filedName
          } else {
            // 如果obj列表不存在obj,添加到objmap中,对应列名添加到列表中
            objMap += oneName
            filedArr += new ListBuffer[String]
            filedArr(objMap.indexOf(oneName)).asInstanceOf[ListBuffer[String]] += filedName
          }

        } else {
          // filed不是object类型,直接放到objmap中.
          objMap += filedName
          filedArr += filedName
        }
      }
    }


    import org.apache.spark.sql.functions._

    // 第二层需要构造的字段ListBuffer(a,b,e,c)
    val twoList = new ListBuffer[String]
    // 第三层需要构造的字段ListBuffer(a,b,ListBuffer(ListBuffer(d)),c)
    val treList = new ListBuffer[Any]
    for (i <- objMap.indices) {
      if (filedArr(i).toString.contains("ListBuffer")) {
        // 需要构造struct的字段集合ListBuffer(a:b)
        val structFiled: ListBuffer[String] = filedArr(i).asInstanceOf[ListBuffer[String]]
        for (elem <- structFiled) {
          if (elem.contains(":")) {
            val elemArr = elem.split(":")
            if (twoList.contains(elemArr(0)) && treList(twoList.indexOf(elemArr(0))).isInstanceOf[ListBuffer[String]]) {
              treList(twoList.indexOf(elemArr(0))).asInstanceOf[ListBuffer[String]] += elemArr(1)
            } else {
              twoList += elemArr(0)
              val temp = new ListBuffer[String]
              temp += elemArr(1)
              treList += temp
            }
          } else {
            twoList += elem
            treList += elem
          }
        }
      }
    }

    for (i <- treList.indices) {
      val ab = new ArrayBuffer[Column]()
      if (treList(i).toString.contains("ListBuffer")) {
        for (elem <- treList(i).asInstanceOf[ListBuffer[String]]) {
          ab += df.col(elem)
        }
        if (!ab.isEmpty) {
          // 第三层已经构建成struct
          df = df.withColumn(twoList(i), struct(ab: _*).as(twoList(i)))
        }
      }
    }
    for (i <- objMap.indices) {
      val bc = new ListBuffer[Column]()
      if (filedArr(i).toString.trim.contains("ListBuffer")) {
        for (elem <- filedArr(i).asInstanceOf[ListBuffer[String]]) {
          if (elem.contains(":")) {
            val eleArr = elem.split(":")
            if (!(bc.contains(df.col(eleArr(0))))) {
              bc += df.col(eleArr(0))
            }
          } else {
            bc += df.col(elem)
          }
        }
      }
      // 构建第二层struct
      if (!bc.isEmpty) {
        df = df.withColumn(objMap(i), struct(bc: _*))
      }
    }
    val colArr = new ArrayBuffer[Column]()
    for (elem <- objMap) {
      colArr += df.col(elem)
    }
    // 筛选出需要的列
    df = df.select(colArr: _*)
    df
  }

  /**
   *
   * @param fieldAndFormatMap
   * @param data
   * @return
   */
  def setDynamicIndex(fieldAndFormatMap: Map[String, String],
                      data: DataFrame) = {
    var res = data
    import org.apache.spark.sql.functions._
    val partitionIndexName = fieldAndFormatMap.head._1
    // 获取动态索引的值
    var data_type = ""
    var splitValues: List[String] = List[String]()
    val f = System.currentTimeMillis()
    val dateColumn = partitionIndexName + "_" + f
    for (elem <- _esSinkConfig.extract_fields) {
      val field = elem.field
      if (field.equalsIgnoreCase(partitionIndexName)) {
        data_type = elem.data_type
      }
    }
    data_type match {
      case ColumnType.DATE | ColumnType.NUMBER => {
        splitValues = data.select(col(fieldAndFormatMap.head._1).cast(StringType)).distinct().collect().map(row => {
          val spl = row.getAs[String](fieldAndFormatMap.head._1)
          spl.trim
        }).distinct.toList.filter(_ != null)
      }
      case ColumnType.DATETIME => {
        res = data.withColumn(dateColumn, col(fieldAndFormatMap.head._1).cast(DateType))
        splitValues = res
          .select(col(dateColumn).cast(StringType))
          .distinct()
          .collect()
          .map(row => {
            val spl = row.getAs[String](dateColumn)
            spl
          }).distinct.toList.filter(_ != null)
      }
      case _ => {
        throw new Exception("暂不支持该类型字段分区")
      }
    }
    //    splitValues = data.select(col(fieldAndFormatMap.head._1).cast(StringType)).distinct().collect().map(row => {
    //      val spl = row.getAs[String](fieldAndFormatMap.head._1)
    //      spl.trim
    //    }).toList.filter(_ != null)

    for (elem <- splitValues) {
      logInfo("splitValues: " + elem)
    }

    var splitValueAndFormatIndexMap: mutable.HashMap[String, String] = mutable.HashMap[String, String]()
    val format = fieldAndFormatMap.head._2
    format.toUpperCase match {
      // 从数据管理获取format,进行拼接
      case FormatEnum.YYYY => {
        for (value <- splitValues) {
          // 字段可能是日期类型也可能是日期时间类型还可能是时间戳类型
          if (value.contains("-")) {
            if (value.contains("T")) {
              // 日期时间类型
              val dateValue = value.split("T")(0)
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (dateValue.split("-")(0))))
            } else {
              // 日期类型
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (value.split("-")(0))))
            }
          } else {
            // 时间戳类型(Long)
            val tsValue = new Date(value.toLong)
            if (tsValue.toString.contains("-")) {
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (tsValue.toString.split("-")(0))))
            }
          }
        }
      }
      case FormatEnum.YYYYMM => {
        for (value <- splitValues) {
          if (value.contains("-")) {
            if (value.contains("T")) {
              // 日期时间类型只取日期,舍去时间类型
              val dateValue = value.split("T")(0)
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (dateValue.split("-")(0) + dateValue.split("-")(1))))
            } else {
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (value.split("-")(0) + value.split("-")(1))))
            }
          } else {
            val tsValue = new Date(value.toLong)
            if (tsValue.toString.contains("-")) {
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (tsValue.toString.split("-")(0) + tsValue.toString.split("-")(1))))
            }
          }
        }
      }
      case FormatEnum.YYYYMMDD => {
        for (value <- splitValues) {
          if (value.contains("-")) {
            if (value.contains("T")) {
              val dateValue = value.split("T")(0)
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (dateValue.split("-")(0) + dateValue.split("-")(1) + dateValue.split("-")(2))))
            } else {
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (value.split("-")(0) + value.split("-")(1) + value.split("-")(2))))
            }
          } else {
            val tsValue = new Date(value.toLong)
            if (tsValue.toString.contains("-")) {
              splitValueAndFormatIndexMap += (value -> (_esSinkConfig.db_name + "___" + (tsValue.toString.split("-")(0) + tsValue.toString.split("-")(1) + tsValue.toString.split("-")(2))))
            }
          }
        }
      }
      case _ => {
        throw new IllegalArgumentException(s"error format: ${format}")
      }
    }

    // 筛选出需要的列
    val colArr = new ArrayBuffer[Column]()
    for (elem <- _esSinkConfig.extract_fields) {
      colArr += col(elem.field)
    }

    val indexAndDFMap = new mutable.HashMap[String, DataFrame]()
    data_type match {
      case ColumnType.DATE | ColumnType.NUMBER => {
        for (i <- splitValues.toArray.indices) {
          // 空值舍去
          val tmpDF = data.filter(col(partitionIndexName) === splitValues(i))
          indexAndDFMap.put(splitValueAndFormatIndexMap(splitValues(i)), tmpDF)
        }
      }
      case ColumnType.DATETIME => {
        for (i <- splitValues.toArray.indices) {
          // 空值舍去
          val tmpDF = res.filter(col(dateColumn) === splitValues(i)).select(colArr: _*)
          indexAndDFMap.put(splitValueAndFormatIndexMap(splitValues(i)), tmpDF)
        }
      }
      case _ => {
        throw new Exception("暂不支持该类型字段分区")
      }
    }
    indexAndDFMap
  }

  def setDefaultValue(sinkSchema: List[EsExtractFieldInfo],
                      columnEntiy: util.List[DmTableColumn],
                      data: DataFrame): DataFrame = {
    //生成[字段名,not_null(true/false)]的map
    val fieldAndNotNull = columnEntiy.asScala.map(colEntiy => {
      val colName: String = colEntiy.getColName
      var not_null: String = ""
      // 数据管理字段详情object字段无not_null
      if (!colEntiy.getColType.equalsIgnoreCase("OBJECT")) {
        colEntiy.getParams.asScala.foreach(map => {
          map.get("pKey") match {
            case "NOT_NULL" => not_null = map.get("pValue")
            case _ =>
          }
        })
      } else {
        // 如果是object类型字段,则为不能为空
        not_null = "false"
      }
      (colName, not_null)
    }).toMap
    for (elem <- fieldAndNotNull) {
      println(elem._1 + "  " + elem._2)
    }
    //只复制有连线关系的Column
    val colArr = new ArrayBuffer[Column]()
    for (ef <- sinkSchema) {
      if (!ef.from_field.trim.equals("")) {
        val to_field: String = ef.field
        val from_field: String = ef.from_field
        if (to_field.contains(":")) {
          val fieldArr = to_field.split(":")
          if (fieldArr.length > 2) {
            colArr += data.col(from_field) as (fieldArr(2))
          } else {
            colArr += data.col(from_field) as (fieldArr(1))
          }
        } else {
          colArr += data.col(from_field) as (to_field)
        }
      }
    }

    // 20200526 全部不连线且默认值为空抛出错误
    if (colArr.isEmpty) {
      var isError = true
      sinkSchema.foreach(schema => {
        if (!"".equals(schema.field_props.default_value)) {
          isError = false
        }
      })
      if (isError) {
        throw new Exception("作业配置异常,目标表没有连接上游字段,且默认值都为空!")
      }
    }

    var value: DataFrame = data.select(colArr: _*)

    // 开始
    val cols = sinkSchema.map(ef => {
      var column: Column = null
      //      val to_field: String = ef.field
      var to_field: String = ef.field
      if (to_field.contains(":")) {
        to_field = to_field.split(":")(1)
      }
      val data_type = ef.data_type
      val from_field: String = ef.from_field
      val default_value: String = ef.field_props.default_value

      if (!from_field.trim.equals("")) { // 有连线
        //将time类型的值去掉空格，否则写入mysql会报错
        if (data_type.equalsIgnoreCase("TIME")) {
          column = trim(value.col(to_field).cast(StringType)) as to_field
        } else if (data_type.equalsIgnoreCase("DATETIME")) {
          column = translate(value.col(to_field).cast(StringType), " ", "T") as to_field
        } else {
          column = value.col(to_field).cast(StringType)
        }

      } else { // 无连线
        if (!data_type.equalsIgnoreCase("SERIAL4") && !data_type.equalsIgnoreCase("SERIAL8")) {
          //          value = value.withColumn(to_field, lit(null).cast(StringType))
          column = lit(null).cast(StringType).as(to_field)
        }
      }
      column
    })

    value = value.select(cols: _*)

    // 结束
    //对目标DF进行默认值填充和类型转换，填充前需要先转为String类型
    for (ef <- sinkSchema) {
      var to_field: String = ef.field
      if (to_field.contains(":")) {
        to_field = to_field.split(":")(1)
      }
      val data_type = ef.data_type
      val from_field: String = ef.from_field
      val default_value: String = ef.field_props.default_value

      //      if (!from_field.trim.equals("")) {
      //        //将time类型的值去掉空格，否则写入mysql会报错
      //        if (data_type.equalsIgnoreCase("TIME")) {
      //          value = value.withColumn(to_field, trim(value.col(to_field).cast(StringType)))
      //        } else {
      //          value = value.withColumn(to_field, value.col(to_field).cast(StringType))
      //        }
      //      } else {
      //        value = value.withColumn(to_field, lit(null).cast(StringType))
      //      }
      //填充默认值
      val fieldTmp = ef.field
      if (ef.field.contains(":")) {
        if (!default_value.equals("")) {
          value = value.na.fill(default_value, Array(to_field))
        } else if (default_value.equals("") && fieldAndNotNull(fieldTmp.split(":")(0)).equalsIgnoreCase("true") && from_field.trim.equals("")) {
          log.error(s"目标字段：${to_field}不能为null!")
          throw new Exception("目标字段不能为null!")
        }
      } else {
        if (!default_value.equals("")) {
          value = value.na.fill(default_value, Array(fieldTmp))
        } else if (default_value.equals("") && fieldAndNotNull(fieldTmp).equalsIgnoreCase("true") && from_field.trim.equals("")) {
          log.error(s"目标字段：${to_field}不能为null!")
          throw new Exception("目标字段不能为null!")
        }
      }
    }
    value
  }


  def setUpsertModeDefaultValue(sinkSchema: List[EsExtractFieldInfo],
                                columnEntiy: util.List[DmTableColumn],
                                data: DataFrame): DataFrame = {
    //生成[字段名,not_null(true/false)]的map
    val fieldAndNotNull = columnEntiy.asScala.map(colEntiy => {
      val colName: String = colEntiy.getColName
      var not_null: String = ""
      // 数据管理字段详情object字段无not_null
      if (!colEntiy.getColType.equalsIgnoreCase("OBJECT")) {
        colEntiy.getParams.asScala.foreach(map => {
          map.get("pKey") match {
            case "NOT_NULL" => not_null = map.get("pValue")
            case _ =>
          }
        })
      } else {
        // 如果是object类型字段,则为不能为空
        not_null = "false"
      }
      (colName, not_null)
    }).toMap
    for (elem <- fieldAndNotNull) {
      println(elem._1 + "  " + elem._2)
    }
    //只复制有连线关系的Column
    val colArr = new ArrayBuffer[Column]()
    for (ef <- sinkSchema) {
      if (!ef.from_field.trim.equals("")) {
        val to_field: String = ef.field
        val from_field: String = ef.from_field
        if (to_field.contains(":")) {
          val fieldArr = to_field.split(":")
          if (fieldArr.length > 2) {
            colArr += data.col(from_field) as (fieldArr(2))
          } else {
            colArr += data.col(from_field) as (fieldArr(1))
          }
        } else {
          colArr += data.col(from_field) as (to_field)
        }
      }
    }

    // 20200526 全部不连线且默认值为空抛出错误
    if (colArr.isEmpty) {
      var isError = true
      sinkSchema.foreach(schema => {
        if (!"".equals(schema.field_props.default_value)) {
          isError = false
        }
      })
      if (isError) {
        throw new Exception("作业配置异常,目标表没有连接上游字段,且默认值都为空!")
      }
    }

    var value: DataFrame = data.select(colArr: _*)

    //对目标DF进行默认值填充和类型转换，填充前需要先转为String类型
    for (ef <- sinkSchema) {
      var to_field: String = ef.field
      logInfo("ef.field:" + ef.field)
      if (to_field.contains(":")) {
        to_field = to_field.split(":")(1)
      }
      logInfo("to_field:" + to_field)
      val fieldTmp = ef.field
      val data_type = ef.data_type
      val from_field: String = ef.from_field
      logInfo("from_field:" + from_field)
      val default_value: String = ef.field_props.default_value
      logInfo("default_value:" + default_value)
      val not_null: Option[String] = ef.field_props.not_null
      if (!from_field.trim.equals("")) { //有连线
        //将time类型的值去掉空格，否则写入mysql会报错
        if (data_type.equalsIgnoreCase("TIME")) {
          value = value.withColumn(to_field, trim(value.col(to_field).cast(StringType)))
        } else if (data_type.equalsIgnoreCase("DATETIME")) {
          value = value.withColumn(to_field, translate(value.col(to_field).cast(StringType), " ", "T").as(to_field))
        } else {
          value = value.withColumn(to_field, value.col(to_field).cast(StringType))
        }
      } else { //无连线
        if (default_value.equals("") && !data_type.equalsIgnoreCase("SERIAL4") && !data_type.equalsIgnoreCase("SERIAL8")) {
          if (fieldAndNotNull(fieldTmp.split(":")(0)).equalsIgnoreCase("true")) {
            log.error(s"目标字段：${to_field}不能为null!")
            throw new Exception("目标字段不能为null!")
          } else if (fieldAndNotNull(fieldTmp).equalsIgnoreCase("true")) {
            log.error(s"目标字段：${to_field}不能为null!")
            throw new Exception("目标字段不能为null!")
          }
        }
      }
    }
    value
  }


  /**
   * 根据字段值获取唯一标识_id
   *
   * @param df
   * @param esFileds
   * @return
   */
  def getDocIDByFileds(doc_id: JObject): String = {
    implicit val formats = DefaultFormats
    log.info("开始构建ES唯一标识...")
    val allFileds = new ArrayBuffer[String]()
    var objName = ""
    var objSonName = ""
    var objGroSonName = ""

    val gen_rule_name_value: String = compact(doc_id.obj.head._2)
    val rule_key = doc_id.obj.last._1
    log.info("rule_key ===> " + rule_key)
    log.info("gen_rule_name_value  ==> " + gen_rule_name_value.substring(1, gen_rule_name_value.length - 1))
    gen_rule_name_value.substring(1, gen_rule_name_value.length - 1) match {
      case "pattern" => {
        val rule_value = doc_id.obj.last._2.extract[PatternRule]

        val patterFields = rule_value.stitching_fields
        val stitching_mode = rule_value.stitching_mode
        if (patterFields.size > 0) {
          for (field <- patterFields) {
            if (field.contains(":")) {
              val fieldArr = field.split(":")
              if (fieldArr.length > 2) {
                objGroSonName = "{" + fieldArr(2) + "}"
                allFileds.append(objGroSonName)
              } else {
                objSonName = "{" + fieldArr(1) + "}"
                allFileds.append(objSonName)
              }
            } else {
              objName = "{" + field + "}"
              allFileds.append(objName)
            }
          }
          if (!stitching_mode.isEmpty) {
            allFileds.mkString(stitching_mode)
          } else {
            log.info("连接符为空,使用默认连接符: _")
            allFileds.mkString("_")
          }
        } else {
          log.info("patter field为空,使用默认规则")
          ""
        }
      }
      // 规则二
      case "field" => {
        val rule_value = compact(doc_id.obj.last._2)
        log.info("rule_value ==> " + rule_value.substring(1, rule_value.length - 1))
        if (!rule_value.isEmpty) {
          rule_value.substring(1, rule_value.length - 1)
        } else {
          log.info("field为空,使用默认规则")
          ""
        }
      }
      // 规则三
      case "uuid" => {
        ""
      }
      case _ => {
        log.info("未知doc_id类型,使用默认规则")
        ""
      }
    }
  }

  // 映射类型
  @deprecated("没有使用的方法")
  def getDataType(dataType: String): String = {
    var value: String = null
    // `string`, `boolean`, `byte`, `short`, `int`, `long`, * `float`, `double`, `decimal`, `date`, `timestamp`.
    value = dataType match {
      case ColumnType.STRING => "string"
      case ColumnType.NUMBER => "int"
      case ColumnType.DATE => "date"
      case ColumnType.DECIMAL => "float"
      case ColumnType.TIME => "string"
      case ColumnType.DATETIME => "timestamp"
      case _ => "string"
    }
    value
  }

  // 解析id
  def resolveVars(docIdPattern: String): ArrayBuffer[String] = {
    log.info(" ==> start resolve doc_id")
    var parseStr = docIdPattern
    var nestedStrings = ArrayBuffer[String]()
    while (parseStr.contains("{")) {
      val startPattern: Int = parseStr.indexOf("{")
      val endPattern: Int = parseStr.indexOf("}")
      val nestedString: String = parseStr.substring(startPattern + 1, endPattern)
      nestedStrings += nestedString
      parseStr = parseStr.substring(endPattern + 1).trim
    }
    log.info("doc id => " + nestedStrings)
    nestedStrings
  }

  // 判断拼接id是否在df的所有列中
  def hasNotValidField(docIdPattern: String, schemaColNames: Array[String]): Boolean = {
    // 解析_id中的字段
    val _idFields = resolveVars(docIdPattern)
    var notInSchemaFields = ArrayBuffer[String]()
    var lowerSchemaColNames = ArrayBuffer[String]()

    // 将df中的字段名称转换为小写
    schemaColNames.foreach(col => {
      lowerSchemaColNames += col.toLowerCase()
    })
    var hasFieldNotInScehma = false
    _idFields.toArray.foreach(field => {
      if (!lowerSchemaColNames.contains(field.toLowerCase())) {
        notInSchemaFields += field
      }
    })
    if (notInSchemaFields != null && notInSchemaFields.size > 0) {
      log.error("the _id has some columns not in dataframe schema,please check.")
      log.error("not in given schema columns:[" + notInSchemaFields.mkString(",") + "]")
      hasFieldNotInScehma = true
    }
    hasFieldNotInScehma
  }
}