package com.kingsoft.dc.khaos.scheduler

import java.util.concurrent.atomic.AtomicInteger

import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.metadata.ModuleCommon
import com.kingsoft.dc.khaos.optimize.DagOptimize
import com.kingsoft.dc.khaos.util.Logging
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.parse

import scala.collection.mutable
import scala.collection.mutable.ListBuffer

/**
 * Created by chengguangqing on 2019/5/28.
 */
class JobScheduler(private val kc: KhaosContext) extends Logging {

  var _khaosScheduler: KhaosScheduler = null
  var _deleteModule: Boolean = false
  var _deleteModule_redis: Boolean = false
  var _deleteModule_kafka: Boolean = false

  def setKhaosScheduler(khaosScheduler: KhaosScheduler): Unit = {
    _khaosScheduler = khaosScheduler
  }

  //注册metrics

  //jobid
  private[scheduler] val nextJobId = new AtomicInteger(0)

  private[scheduler] def numTotalJobs: Int = nextJobId.get()

  //编排组件执行顺序 kahn算法
  def moduleSort(adj: collection.mutable.Map[String, List[String]],
                 modules: List[ModuleCommon]): List[ModuleCommon] = {

    var originModules = new ListBuffer[String]()
    var result = new collection.mutable.ListBuffer[ModuleCommon]()

    val moduleMap = modules.map { mc =>
      if (mc.clazzType.equals("source") && mc.moduleConf.indexOf(".MppSource") > 0) {
        _deleteModule = true
      } else if (mc.clazzType.equals("source") && mc.moduleConf.indexOf(".RedisSource") > 0) {
        implicit val formats = DefaultFormats
        val parser = parse(mc.moduleConf, true)
        val on_off = try {
          (((parser \\ "advanced_options") \\ "delete_keys") \\ "on_off").extract[Boolean]
        } catch {
          case ex: Exception => false
        }
        if (on_off)
          _deleteModule_redis = true
      } else if (mc.clazzType.equals("source") && mc.moduleConf.indexOf(".KafkaSource") > 0) {
        _deleteModule_kafka = true
      } else if (mc.clazzType.equals("sink") && mc.moduleConf.indexOf(".ESSink") > 0) { //TODO v2.0 在cos/ks3/hdfs->es时,cos浮点类型字段强转string ，解决写入es精度丢失问题
        kc.conf.set("judge_essink_tag", "true")
      } else if (mc.clazzType.equals("sink") && mc.moduleConf.indexOf(".MppSink") > 0) { //TODO v2.0 污染了khaos框架,在cos/ks3/hdfs->mpp时,cos浮点类型转为decimal ，解决精度丢失问题
        kc.conf.set("judge_mppsink_tag", "true")
      } else if (mc.clazzType.equals("sink") && mc.moduleConf.indexOf(".PhoenixSink") > 0) { //TODO v2.0 污染了khaos框架,在cos/ks3/hdfs->phoenix时,cos浮点类型转为decimal ，解决精度丢失问题
        kc.conf.set("judge_phoenixsink_tag", "true")
      }
      (mc.id, mc)
    }.toMap

    var count = 0
    val size = adj.size

    //获取Dag的头
    adj.foreach {
      case (id, dependence) =>
        if (dependence.isEmpty) {
          originModules += id
          result += moduleMap.get(id).get
          adj -= id
        }
    }

    while (originModules.size != size) {
      adj.foreach {
        case (id, dependence) =>
          var flag = true
          dependence.foreach { parent =>
            if (!originModules.contains(parent)) flag = false
          }

          if (flag) {
            originModules += id
            result += moduleMap.get(id).get
            adj -= id
          }
      }
      if (count.equals(originModules.size)) {
        //抛自定义错误 "THE CIRCLE ITEM IS: ${adj.mkString(";")}"
      }
      count = originModules.size
    }
    result.toList
  }

  //提交job
  def runJob(applicationId: Int,
             modules: List[ModuleCommon],
             listener: ApplicationListener): Unit = {
    if (modules.isEmpty) {
      listener.applicationSucceeded()
      return
    }

    val jobId = nextJobId.incrementAndGet()

    //获取每个组件的入度信息(依赖信息)
    var adj = new scala.collection.mutable.HashMap[String, List[String]]()
    modules.map { mc =>
      adj += (mc.id -> mc.dependence)
    }

    //根据依赖信息排序组件执行顺序
    var moduleSorted = moduleSort(adj, modules)

    //获取每个组件的出度信息(向下输出)
    var adjOut = new collection.mutable.HashMap[String, ListBuffer[String]]()
    modules.map { mc =>
      var tmp = new ListBuffer[String]()
      mc.degreeOut.map { x =>
        tmp += x
      }
      adjOut += (mc.id -> tmp)
    }

    //添加删除算子 //optimize //21202F2938212B3E22272626252E434D
    if (_deleteModule) {
      val tmpModuleSorted = mutable.ListBuffer[ModuleCommon]()
      for (ms <- moduleSorted) {
        tmpModuleSorted.append(ms)
      }
      tmpModuleSorted.append(DagOptimize.appendDeleteModuleWithCos(kc))
      moduleSorted = tmpModuleSorted.toList
    }
    //redis 删除已同步的数据
    if (_deleteModule_redis) {
      val tmpModuleSorted = mutable.ListBuffer[ModuleCommon]()
      for (ms <- moduleSorted) {
        tmpModuleSorted.append(ms)
      }
      tmpModuleSorted.append(DagOptimize.appendDeleteModuleWithRedis(kc))
      moduleSorted = tmpModuleSorted.toList
    }

    // kafka提交offset到mysql中
    if (_deleteModule_kafka) {
      val tmpModuleSorted = mutable.ListBuffer[ModuleCommon]()
      for (ms <- moduleSorted) {
        tmpModuleSorted.append(ms)
      }
      tmpModuleSorted.append(DagOptimize.appendOffsetModuleWithKafka(kc))
      moduleSorted = tmpModuleSorted.toList
    }

    //添加收集监控指标的算子
    if (true) {
      val tmpModuleSorted = mutable.ListBuffer[ModuleCommon]()
      for (ms <- moduleSorted) {
        tmpModuleSorted.append(ms)
      }
      tmpModuleSorted.append(DagOptimize.appendCollectModuleWithAppEnd(kc))
      moduleSorted = tmpModuleSorted.toList
    }

    //在sourceModule中添加表的信息


    _khaosScheduler.jobSubmitted(jobId, moduleSorted, adjOut, listener)
  }

  def cancelJob(jobId: Int): Unit = {}
}

