package com.kingsoft.dc.khaos.module.spark.sink

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.check.ESCheck
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.module.spark.metadata.sink._
import com.kingsoft.dc.khaos.module.spark.util.{CheckUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.elasticsearch.spark.sql.DefaultSource15
import org.json4s.{DefaultFormats, JsonAST}

class ESCheckSink extends SinkStrategy with Logging {
  private var _kc: KhaosContext = null
  private var _esSinkConfig: ESSinkConfig = null
  private val writeModeMap = Map("append" -> SaveMode.Append,
    "overwrite" -> SaveMode.Overwrite)

  private var checkInfo: ESCheck = null

  /** 数据输出 */
  override def sink(kc: KhaosContext,
                    module_id: String,
                    config: JsonAST.JObject,
                    schema: Schema,
                    dataSet: DataFrame): Any = {
    //    log.info(s"ESCheckSinkDF schema is: ${dataSet.printSchema}")
    //    log.info(s"ESCheckSinkDF is: ${dataSet.show(false)}")
    // 数据为空,则不上报es
    if (!dataSet.rdd.isEmpty()) {
      // 解析config
      implicit val formats = DefaultFormats
      val esSinkConfig = config.extract[ESSinkConfig]
      _esSinkConfig = esSinkConfig
      _kc = kc

      // 加载es配置
      val esConfig = initEsConf()
      //log.info("esConfig ==> " + esConfig.mkString(" "))
      val sqlContext = kc.sparkSession.sqlContext
      val ds: DefaultSource15 = new DefaultSource15
      log.info("开始上传检核结果到ES")

      ds.createRelation(sqlContext,
        writeModeMap.getOrElse("append", SaveMode.Append),
        esConfig,
        dataSet)
    }
    if (kc.conf.getString("stop_check").equalsIgnoreCase("true")) {
      //默认任务最大重试次数
      //kc.sparkSession.conf.set("spark.yarn.maxAppAttempts", 1)
      //log.info(s"new maxAppAttempts is: ${kc.sparkSession.conf.get("spark.yarn.maxAppAttempts")}")
      throw new Exception(s"检核上报ES的异常数据条数超出用户设置的最大值，同步任务执行失败!")
    }
  }

  // 加载es配置
  def initEsConf() = {
    //21202F2938212B3E22272626252E434D
    //log.info("获取ES检核上报配置信息...")

    // 获取es物理地址
    checkInfo = CheckUtils.getESCheckInfo(_kc,
      "com.kingsoft.dc.khaos.extender.meta.impl.DmCheckImpl")

    val host = checkInfo.getHost
    val port = checkInfo.getPort
    val index_name = checkInfo.getDbName
    val type_name = checkInfo.getTableName
    val clusterNodes = checkInfo.getClusterNodes
    var esConfig = new scala.collection.immutable.HashMap[String, String]

    val nodeArr = clusterNodes.split(",")
    var nodes = ""
    for (i <- 0 until nodeArr.size) {
      nodes += nodeArr(i).split(":")(0) + ","
    }
    esConfig += ("es.nodes.wan.only" -> "true")
    esConfig += ("es.nodes" -> nodes.dropRight(1))
    esConfig += ("es.port" -> nodeArr(0).split(":")(1))
    //    esConfig += ("es.nodes" -> "10.77.12.23,10.77.12.3")
    //    esConfig += ("es.port" -> "31347")

    if (index_name != null && type_name != null) {
      //log.info("resource ==> " + index_name + "/" + type_name)
      esConfig += ("es.resource" -> s"${index_name}/${type_name}")
    } else {
      log.error("can't found index and type config")
    }
    esConfig
  }
}