package com.edata.bigdata.basic

import com.edata.bigdata.annotations.{Edata_Loader, Edata_Saver, Edata_Sink, Edata_Source}
import com.edata.bigdata.flink.{Sink, Source}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.thrift.protocol.TCompactProtocol

import scala.collection.mutable
import scala.reflect.ClassTag


class Manager {

  var APPNAME: String = "EDATA"
  /*
  *-----------------Spark作为计算引擎时-----------------------
  *1.local：本地模式，单线程，没有任何并行计算
  *2.local[k]：本地模式，指定使用多少个线程并行计算，k即使用k个Worker线程
  *3.local[*]：本地模式，按照CPU最多cores设置线程数
  *4.spark://[host]:[port]，Standalone模式，Spark自己负责资源的管理调度。此时master通常就一个。
  *5.mesos://[host]:[port]：使用mesos来管理资源调度
  *6.yarn 采用yarn来管理资源调度，进一步可分成 cluster 和 client，由deploy-mode参数确定
  *   6.1.cluster 生产环境常用的模式，所有的资源调度和计算都在集群环境上运行
  *   6.2.client Spark Driver和ApplicationMaster进程均在本机运行，而计算任务在cluster
  *
  * -----------------Flink作为计算引擎时-----------------------
  *
  * */
  var MASTER: String = "local[*]"

  /*
  *1.Spark standalone模式下
  * 1.1当使用 idea 远程连接 spark 引擎调试代码，此时 driver 在 client 端，即 idea 所在的 ip 节点下
  * 需要指定 driver 所在的 ip 和 port，以提供 worker 与 driver 通信。
  * 例如：CONFIG.put("spark.driver.host","192.168.36.1")，CONFIG.put("spark.driver.port","7076")
  *
  * */
  var CONFIG: mutable.HashMap[String, String] = new mutable.HashMap[String, String]()
  var SESSION: SparkSession = null
  var flinkENV: StreamExecutionEnvironment = _
  var flinkConf: Configuration = new Configuration()
  var loaders: mutable.HashMap[String, String] = _
  var savers: mutable.HashMap[String, String] = _
  var sources: mutable.HashMap[String, String] = _
  var sinks: mutable.HashMap[String, String] = _
  var algorithms: mutable.HashMap[String, String] = _

  def this(
            loaders: mutable.HashMap[String, String],
            savers: mutable.HashMap[String, String],
            sources: mutable.HashMap[String, String],
            sinks: mutable.HashMap[String, String],
            algorithms: mutable.HashMap[String, String]
          ) {
    this()
    this.loaders = loaders
    this.savers = savers
    this.sources = sources
    this.sinks = sinks
    this.algorithms = algorithms
  }

  def createSparkSession(): SparkSession = {
    var spark_conf = new SparkConf
    val ssb = SparkSession.builder().appName(APPNAME).master(MASTER) //.enableHiveSupport()
    if (!CONFIG.isEmpty) {
      for ((k, v) <- CONFIG) {
        if (k.equals("spark.serializer")) {
          spark_conf.registerKryoClasses(Array[Class[_]](classOf[TCompactProtocol]))
        }
        else {
          spark_conf.set(k, v)
        }
      }
    }
    ssb.config(spark_conf)
    ssb.getOrCreate()
  }

  def createFlinkEnvironment(): StreamExecutionEnvironment = {
    flinkENV = StreamExecutionEnvironment.getExecutionEnvironment
    flinkENV.configure(flinkConf)
    flinkENV
  }

  def createLoader(loaderType: String): Any = {
    var loader: Loader = null
    if (loaders.keySet.contains(loaderType)) {
      val className = loaders.get(loaderType).get
      loader = Class.forName(className).newInstance().asInstanceOf[Loader]
    } else {
      Manager.LOGGER.warn(s"${loaderType} do not exist")
      System.exit(0)
    }
    if (SESSION == null) {
      SESSION = createSparkSession()
    }
    loader.session = SESSION
    loader
  }

  def createSaver(saverType: String): Any = {
    var saver: Saver = null
    if (savers.keySet.contains(saverType)) {
      val className = savers.get(saverType).get
      saver = Class.forName(className).newInstance().asInstanceOf[Saver]
    } else {
      Manager.LOGGER.warn(s"${saverType} do not exist")
      System.exit(0)
    }
    if (SESSION == null) {
      SESSION = createSparkSession()
    }
    saver.session = SESSION
    saver
  }

  def createSource[T: ClassTag](sourceType: String): Any = {
    var source: Source[T] = null
    if (sources.keySet.contains(sourceType)) {
      val className = sources.get(sourceType).get
      val clazz = implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]]
      val ti = TypeInformation.of(clazz)
      val constructor = Class.forName(className).getConstructors
      source = constructor(0).newInstance(ti).asInstanceOf[Source[T]]
    } else {
      Manager.LOGGER.warn(s"${sourceType} do not exist")
      System.exit(0)
    }
    if (flinkENV == null) {
      flinkENV = createFlinkEnvironment()
    }
    source.env = flinkENV
    source
  }

  def createSink[T: ClassTag](sinkType: String): Any = {
    var sink: Sink[T] = null
    if (sinks.keySet.contains(sinkType)) {
      val className = sinks.get(sinkType).get
      val clazz = implicitly[ClassTag[T]]
      val constructor = Class.forName(className).getConstructors
      sink = constructor(0).newInstance(clazz).asInstanceOf[Sink[T]]
    } else {
      Manager.LOGGER.warn(s"${sinkType} do not exist")
      System.exit(0)
    }
    if (flinkENV == null) {
      flinkENV = createFlinkEnvironment()
    }
    sink.env = flinkENV
    sink

  }

  def stop(): Unit = {
    SESSION.stop()
  }
  //
  //  def createAlgorithm(algorithmType: String): Any = {
  //    var algorithm: Algorithms = null
  //    if (algorithms.keySet.contains(algorithmType)) {
  //      val algClassName = algorithms.get(algorithmType).get
  //      algorithm = Class.forName(algClassName).newInstance().asInstanceOf[Algorithms]
  //    } else {
  //      EDataManager.LOGGER.warn(s"Algorithm ${algorithmType} do not existt")
  //      System.exit(0)
  //    }
  //    if (SESSION == null) {
  //      SESSION = createSparkSession()
  //    }
  //    algorithm.SESSION = SESSION
  //    algorithm
  //  }
  //

  //
  //  def help(helpType: String): Unit = {
  //    helpType match {
  //      case "executor" => {
  //        executors.foreach(element => {
  //          println(s"${element._1} --> ${element._2}")
  //        })
  //      }
  //      case "algorithm" => {
  //        algorithms.foreach(element => {
  //          println(s"${element._1} --> ${element._2}")
  //        })
  //      }
  //    }
  //  }
}

object Manager {
  @transient lazy val LOGGER = Logger.getLogger(this.getClass)
  private var executorType = new mutable.HashSet[String]()

  def apply[T](clazz: Class[T]): Manager = {

    /*Loader & Saver*/
    val loaders = new mutable.HashMap[String, String]()
    val loaderTypes = Array(
      "com.edata.bigdata.postgres.PgLoader",
      "com.edata.bigdata.nebula.NeLoader"
    )
    val classLoader = this.getClass().getClassLoader()
    loaderTypes.foreach(lt => {
      val name = loader_analyze(classLoader, lt)
      if (name != null && !name.equals("")) {
        LOGGER.info(s"loading class ${lt} to EData's Spark Context ")
        loaders.put(name, lt)
      }
    })

    val savers = new mutable.HashMap[String, String]()
    val saverTypes = Array(
      "com.edata.bigdata.postgres.PgSaver",
      "com.edata.bigdata.nebula.NeSaver"
    )
    saverTypes.foreach(st => {
      val name = saver_analyze(classLoader, st)
      if (name != null && !name.equals("")) {
        LOGGER.info(s"loading class ${st} to EData's Spark Context ")
        savers.put(name, st)
      }
    })

    /*Source & Sink*/
    val sources = new mutable.HashMap[String, String]()
    val sourceTypes = Array(
      "com.edata.bigdata.flink.kafka.FKSource"
    )
    sourceTypes.foreach(st => {
      val name = source_analyze(classLoader, st)
      if (name != null && !name.equals("")) {
        LOGGER.info(s"loading class ${st} to EData's Flink Context ")
        sources.put(name, st)
      }
    })

    val sinks = new mutable.HashMap[String, String]()
    val sinkTypes = Array(
      "com.edata.bigdata.flink.postgres.FPSink"
    )
    sinkTypes.foreach(st => {
      val name = sink_analyze(classLoader, st)
      if (name != null && !name.equals("")) {
        LOGGER.info(s"loading class ${st} to EData's Flink Context ")
        sinks.put(name, st)
      }
    })

    val algorithms = new mutable.HashMap[String, String]()
    new Manager(loaders,
      savers,
      sources,
      sinks,
      algorithms)
  }

  def loader_analyze(classLoader: ClassLoader, className: String): String = {
    var name = ""
    try {
      val clazz = classLoader.loadClass(className)
      val annotation = clazz.getAnnotation[Edata_Loader](classOf[Edata_Loader])
      if (annotation != null) {
        name = annotation.target()
      }
    } catch {
      case e: Exception => LOGGER.warn(s"${className} is not existed...")
    }
    name
  }

  def saver_analyze(classLoader: ClassLoader, className: String): String = {
    var name = ""
    try {
      val clazz = classLoader.loadClass(className)
      val annotation = clazz.getAnnotation[Edata_Saver](classOf[Edata_Saver])
      if (annotation != null) {
        name = annotation.target()
      }
    } catch {
      case e: Exception => LOGGER.warn(s"${className} is not existed...")
    }
    name
  }

  def source_analyze(classLoader: ClassLoader, className: String): String = {
    var name = ""
    try {
      val clazz = classLoader.loadClass(className)
      val annotation = clazz.getAnnotation[Edata_Source](classOf[Edata_Source])
      if (annotation != null) {
        name = annotation.target()
      }
    } catch {
      case e: Exception => LOGGER.warn(s"${className} is not existed...")
    }
    name
  }

  def sink_analyze(classLoader: ClassLoader, className: String): String = {
    var name = ""
    try {
      val clazz = classLoader.loadClass(className)
      val annotation = clazz.getAnnotation[Edata_Sink](classOf[Edata_Sink])
      if (annotation != null) {
        name = annotation.target()
      }
    } catch {
      case e: Exception => LOGGER.warn(s"${className} is not existed...")
    }
    name
  }

}