package com.kingsoft.dc.khaos

import java.text.SimpleDateFormat
import java.util.Date
import java.util.concurrent.atomic.AtomicBoolean

import com.kingsoft.dc.khaos.dsl.utils.UdfUtils
import com.kingsoft.dc.khaos.innertype.Schema
import com.kingsoft.dc.khaos.metadata.{Dependency, ModuleCommon}
import com.kingsoft.dc.khaos.scheduler._
import com.kingsoft.dc.khaos.util._
import org.apache.spark.sql.SparkSession
import org.apache.spark.util.LongAccumulator
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}
import scala.util.control.NonFatal

/**
  * Created by chengguangqing on 2019/5/27.
  */
class KhaosContext(config:KhaosConf) extends Logging{

  private val stoped:AtomicBoolean = new AtomicBoolean(false);

  //组件间传输数据传递
  private var _dataSetChannel:DataSetChannel = _
  private var _schemaChannel:SchemaChannel = _

  //组件间消息传递(20200630 e.g redis删除已同步的数据)
  var _valuesChannel:ValuesChannel = _

  //引擎信息
  private var _conf:KhaosConf = _
  private var _env:KhaosEnv = _

  //spark环境
  private var _sparkConf:SparkConf = _
  private var _sparkContext:SparkContext = _
  private var _sparkSession:SparkSession = _

  //Scheduler
  private var _khaosScheduler:KhaosScheduler = _
  private var _jobScheduler:JobScheduler =_
  private var _taskScheduler:TaskScheduler = _

  private var _applicationId: String = _

  private var _moduleConfList: List[ModuleCommon] = _

  //获取方法
  def conf:KhaosConf = _conf
  def env:KhaosEnv = _env

  def sparkConf:SparkConf = _sparkConf

  def sparkSession: SparkSession ={
    if(_sparkSession != null){
      _sparkSession
    }
    else{
      initSpark()
      _sparkSession
    }
  }

  //获取结构
  def structData[T](dependency: Dependency): (Schema, T) = {
    Try(
      //临时修改
      //schemaChannel.getSchema(dependency)
      schemaChannel.getSchema(Dependency(dependency.sourceNode)),
      dataSetChannel.getDataSet[T](dependency)
    ) match {
      case Success(structData) =>
        structData
      case Failure(e) =>
        throw new IllegalStateException(
          s"""
             |${e.getMessage}
             |${e.getStackTraceString}
           """.stripMargin)
    }
  }

  //create dataframe
  def structData[T](tableName:String): (Schema, T) = {
    val dependency = Dependency(null,null,null,null,tableName)
    structData[T](dependency)
  }

  def khaosScheduler: KhaosScheduler = _khaosScheduler
  def jobScheduler: JobScheduler = _jobScheduler
  def taskScheduler: TaskScheduler = _taskScheduler

  def applicationId: String = _applicationId

  def dataSetChannel: DataSetChannel = _dataSetChannel
  def schemaChannel: SchemaChannel = _schemaChannel

  def addDataSetChannel(dataSetChannel:DataSetChannel): this.type ={
    _dataSetChannel = dataSetChannel
    this
  }

  def addSchemaChannel(schemaChannel:SchemaChannel):this.type ={
    _schemaChannel = schemaChannel
    this
  }

  //初始化spark环境
  def initSpark(): Unit = {
    addSparkSession()
    addAbnormalAccumulator()
  }


  //dataframe
  def addSparkSession(): this.type = {

    val runEnv = conf.getString("job.run.env")
    var envFlag = ""
    if (runEnv.equalsIgnoreCase("online")) {
      envFlag = "prod"
    } else {
      envFlag = "test"
    }
    _sparkSession = getSparkSession()
//    _sparkSession = SparkSession.builder.appName(_conf.getString("khaos.app.name"))
//      .enableHiveSupport()
//      //.config("spark.local.dir",conf.getString("khaos.spark.local.dir", "/home/hdfs/khaos/spark/tmp"))
//      .config("hive.metastore.dml.events",conf.getString("khaos.hive.metastore.dml.events","false"))
//      .config("hive.exec.dynamic.partition.mode","nonstrict")
//      .config("hive.exec.dynamic.partition","true")
//      //新增的二行
//      .config("hive.exec.max.dynamic.partitions","5000")
//      .config("hive.exec.max.dynamic.partitions.pernode","2000")
//      //.config("hive.metastore.uris", conf.getString(s"hive-service-${envFlag}_metastore"))
////      .config("hive.metastore.sasl.enabled", "true")
//      //AppConfigParseKsyun 21202F2938212B3E22272626252E434D
//      .getOrCreate()
// 注册udf
    UdfUtils.registerUDF(_sparkSession)
    this
  }
  def getSparkSession():SparkSession= {
    val sparkConf = new SparkConf()

    val optParamsJson = _conf.getString(KhaosConstants.KHAOS_USER_OPTPARAMS, null)
    val notAllowSetConfigs: Array[String] = _conf.getString(KhaosConstants.KHAOS_USER_ALLOWSET_BLACKLIST).split(",")
    log.info(s"useSetParams:$optParamsJson")
    log.info(s"notAllowSetConfigs:${notAllowSetConfigs.mkString("[",",","]")}")

    if (optParamsJson != null){
      val finalSetParams: collection.Map[String, String] = KhaosUtils.getSparkEngineConfigs(optParamsJson, notAllowSetConfigs)
      log.info(s"finalSetSparkConf:$finalSetParams")
      sparkConf.setAll(finalSetParams)
      _conf.remove(KhaosConstants.KHAOS_USER_OPTPARAMS)
      _conf.remove(KhaosConstants.KHAOS_USER_ALLOWSET_BLACKLIST)
    }

    //TODO 需要验证通过plugin.properties设置的优先级是否高于当前的设置
    SparkSession.builder().appName(_conf.getString("khaos.app.name"))
      .enableHiveSupport()
      .config("hive.metastore.dml.events", conf.getString("khaos.hive.metastore.dml.events", "false"))
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.max.dynamic.partitions","5000")
      .config("hive.exec.max.dynamic.partitions.pernode","2000")
      .config(sparkConf)
      .getOrCreate()
  }

  //添加一个全局累加器, 用于统计忽略的异常数据条数
  def addAbnormalAccumulator(): LongAccumulator ={
    val khaos_abnormal_data_num: LongAccumulator = _sparkSession.sparkContext.longAccumulator("khaos_abnormal_data_num")
    _valuesChannel.emit(KhaosConstants.KHAOS_ABNORMALACCUMULATOR,khaos_abnormal_data_num)
    khaos_abnormal_data_num
  }


  def initJson(): Unit = {
    val jsonConf = _conf.getString("khaos.app.json", "")
    if ("".equals(jsonConf)) {
      throw new Exception("参数设置")
    }

    val config = new AppConfigParseKsyun(_conf).parseJson(jsonConf)
    _moduleConfList = config._5
    _conf.set("khaos.app.name", "khaos_" + config._1 + "_" + config._2 + "_" + new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()))
    _conf.set("khaos.app.priority", config._3)
    val commonEnv = config._4
    commonEnv.keySet.foreach(key => {
      val tKey: String = KhaosUtils.transformConfigKey(key)
      _conf.set(tKey, commonEnv.get(key).get.toString)
    })

    //新增用户设置参数
    val optParamsJson = _conf.getString(KhaosConstants.KHAOS_USER_OPTPARAMS, null)
    val notAllowSetConfigs: Array[String] = _conf.getString(KhaosConstants.KHAOS_USER_ALLOWSET_BLACKLIST).split(",")
    //兼容历史任务
    if(optParamsJson != null){
      val finalPluginSetParams: collection.Map[String, String] = KhaosUtils.getPluginsConfigs(optParamsJson, notAllowSetConfigs)
      log.info(s"finalPluginSetParams$finalPluginSetParams")
      _conf.setAll(finalPluginSetParams)
    }
  }


  def loadSystemEnv(): Unit = {
    for ((k, v) <- System.getProperties.asScala.toMap[String, String] if k.startsWith("khaos.")) {
      if (!_conf.contains(k)) {
        _conf.set(k, v)
      }
    }
  }

  def createKhaosEnv(conf: KhaosConf): KhaosEnv = {
    KhaosEnv.createEnv(_applicationId, conf, _moduleConfList)
  }


  def runApp():Unit ={
    try{
      khaosScheduler.run(env._dittoInfo)
    } catch{
      case ex: Exception =>
        //log.error("Error run Khaos."+ex.toString)
        throw ex
    }
  }
  //初始化Context
  try{
    _conf = config
    loadSystemEnv()
    _conf.validateSettings()
    initJson()

    _schemaChannel = new SchemaChannel()
    _dataSetChannel = new DataSetChannel()

    _valuesChannel = new ValuesChannel()
    _taskScheduler = new TaskSchedulerSparkImple(this, 3)
    _jobScheduler = new JobScheduler(this)
    _khaosScheduler = new KhaosScheduler(this)



    _env = createKhaosEnv(_conf)
    KhaosEnv.set(_env)

    //有问题
    _applicationId = _taskScheduler.applicationId().toString
    _conf.set("khaos.app.id",_applicationId)
  }
  catch {
    case NonFatal(e) =>
      log.error("Error initializing KhaosContext." +e.toString)
      try{
        stop()
      }
      catch {
        case NonFatal(inner) =>
          log.error("Error stopping SparkContext after init error.", inner)
      } finally {
        throw e
      }
  }

  //停止所有组件和线程
  def stop(): Unit = {
   _sparkContext.stop()
    if(!stoped.compareAndSet(false,true)){
      logInfo("KhaosContext already stopped")
    }
  }
}
