package com.edata.bigdata.util

import com.edata.bigdata.annotation.{EData_Graph, EData_SId, EData_TId, EData_VId, Edata_Algorithm, Edata_Executor, Edata_Field, Edata_Id, Edata_Object, Edata_Scan}
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.thrift.protocol.TCompactProtocol
import org.json.{JSONArray, JSONObject}

import java.io.File
import java.net.URLClassLoader
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.util.control.Breaks.{break, breakable}


class EDataManager {

  /*
  *{
   "OBJECT":"kafka_offset"
  *"ID":{"FIELD":"field_A","COLUMN":"column_A","DATATYPE":"string"}
   "COLUMNS":[
     {"FIELD":"field_A","COLUMN":"column_A","DATATYPE":"string"},
     {"FIELD":"field_B","COLUMN":"column_B","DATATYPE":"string"}
    ]
   }
  * */

  var APPNAME: String = "EDATA"
  /*
  *
  * （1）local：本地模式，单线程，没有任何并行计算
  *（2）local[k]：本地模式，指定使用多少个线程并行计算，k即使用k个Worker线程
  *（3）local[*]：本地模式，按照CPU最多cores设置线程数
  *（4）spark://[host]:[port]：Standalone模式，Spark自己负责资源的管理调度，
  *它将cluster中的机器分为master机器和worker机器，master通常就一个。
  *（5）mesos://[host]:[port]：使用mesos来管理资源调度
  *（6）yarn 采用yarn来管理资源调度，进一步可分成 cluster 和 client，由deploy-mode参数确定
  *（6.1）cluster 生产环境常用的模式，所有的资源调度和计算都在集群环境上运行
  *（6.2）client Spark Driver和ApplicationMaster进程均在本机运行，而计算任务在cluster
  *
  * */
  var MASTER: String = "local[*]"
  var CONFIG: HashMap[String, String] = new HashMap[String, String]()
  var SESSION: SparkSession = null

  var beanSchema: mutable.HashMap[String, String] = _
  var executors: mutable.HashMap[String, String] = _
  var algorithms: mutable.HashMap[String, String] = _


  def this(beanSchema: mutable.HashMap[String, String], executors: mutable.HashMap[String, String], algorithms: mutable.HashMap[String, String]) {
    this()
    this.beanSchema = beanSchema
    this.executors = executors
    this.algorithms = algorithms
  }

  def createExecutor(executorType: String): Any = {
    var executor: Executor = null
    if (executors.keySet.contains(executorType)) {
      val exeClassName = executors.get(executorType).get
      executor = Class.forName(exeClassName).newInstance().asInstanceOf[Executor]
    } else {
      EDataManager.LOGGER.warn(s"Executor ${executorType} do not exist")
      System.exit(0)
    }
    executor.beanSchema = beanSchema
    if (SESSION == null) {
      SESSION = createSparkSession()
    }
    executor.SESSION = SESSION
    executor
  }

  def createAlgorithm(algorithmType: String): Any = {
    var algorithm: Algorithms = null
    if (algorithms.keySet.contains(algorithmType)) {
      val algClassName = algorithms.get(algorithmType).get
      algorithm = Class.forName(algClassName).newInstance().asInstanceOf[Algorithms]
    } else {
      EDataManager.LOGGER.warn(s"Algorithm ${algorithmType} do not existt")
      System.exit(0)
    }
    if (SESSION == null) {
      SESSION = createSparkSession()
    }
    algorithm.SESSION = SESSION
    algorithm
  }

  def createSparkSession(): SparkSession = {
    var spark_conf = new SparkConf
    val ssb = SparkSession.builder().appName(APPNAME).master(MASTER) //.enableHiveSupport()
    if (!CONFIG.isEmpty) {
      for ((k, v) <- CONFIG) {
        if (k.equals("spark.serializer")) {
          spark_conf.registerKryoClasses(Array[Class[_]](classOf[TCompactProtocol]))
        }
        else {
          spark_conf.set(k, v)
        }
      }
    }
    ssb.config(spark_conf)
    ssb.getOrCreate()
  }

  def help(helpType: String): Unit = {
    helpType match {
      case "executor" => {
        executors.foreach(element => {
          println(s"${element._1} --> ${element._2}")
        })
      }
      case "algorithm" => {
        algorithms.foreach(element => {
          println(s"${element._1} --> ${element._2}")
        })
      }
    }
  }
}

object EDataManager {

  @transient lazy val LOGGER = Logger.getLogger(this.getClass)
  private var executorType = new mutable.HashSet[String]()

  def apply[T](clazz: Class[T]): EDataManager = {
    val beanclassSchema = mutable.HashMap[String, String]()
    val clazzDir = this.getClass().getClassLoader().getResource("").getPath.substring(1)
    val annos = clazz.getAnnotations
    var beanDir = "/usr/local/"
    var executorDir = "/usr/local/"
    breakable {
      for (a <- annos) {
        if (a.annotationType == classOf[Edata_Scan]) {
          val scan_anno = a.asInstanceOf[Edata_Scan]
          beanDir = scan_anno.bean()
          executorDir = scan_anno.executor()
          break
        }
      }
    }
    /*bean*/
    val beanDirAbsolute = clazzDir + beanDir.replaceAll("\\.", "/")
    val beanClassDirFile = new File(beanDirAbsolute)
    val beanClassFileFiles = beanClassDirFile.listFiles()
    if (beanClassFileFiles != null && !beanClassFileFiles.isEmpty) {
      val classLoader = URLClassLoader.newInstance(Array(beanClassDirFile.toURI.toURL))
      beanClassFileFiles.foreach(file => {
        val className = beanDir + "." + file.getName.stripSuffix(".class")
        LOGGER.info(s"Loading bean class ${className} to EData Context ")
        /*Graph Type bean*/
        val beanclassGraphJsonStr = beanClassGraphAnalyze(classLoader, className)
        if (beanclassGraphJsonStr != null && !beanclassGraphJsonStr.equals("{}")) {
          beanclassSchema.put(className, beanclassGraphJsonStr)
        }
        /*Object Type bean*/
        val beanclassObjectJsonStr = beanClassObjectAnalyze(classLoader, className)
        if (beanclassObjectJsonStr != null && !beanclassObjectJsonStr.equals("{}")) {
          beanclassSchema.put(className, beanclassObjectJsonStr)
        }
      })
    } else {
      LOGGER.warn(s"No loadable scala | java bean exist in ${beanDir}")
    }

    /*Executor*/
    val executors = new mutable.HashMap[String, String]()
    val exeTypes = Array(
      "com.edata.bigdata.postgre.SparkPostgresExecutor",
      "com.edata.bigdata.mongo.SparkMongoExecutor",
      "com.edata.bigdata.s3a.SparkS3AExecutor",
      "com.edata.bigdata.kafka.SparkKafkaConsumer",
      "com.edata.bigdata.kafka.SparkKafkaProducer",
      "com.edata.bigdata.hdfs.SparkHdfsExecutor",
      "com.edata.bigdata.nebula.SparkNebulaExecutor"
    )
    val classLoader = this.getClass().getClassLoader()
    exeTypes.foreach(et => {
      val executorName = executorAnalyze(classLoader, et)
      if (executorName != null && !executorName.equals("")) {
        LOGGER.info(s"Loading executor class ${et} to EData Context ")
        executors.put(executorName, et)
      }
    })
    /*algorithms*/
    val algorithms = new mutable.HashMap[String, String]()
    val algTypes = Array(
      "com.edata.bigdata.algorithm.networks.KCores",
      "com.edata.bigdata.algorithm.networks.AllPairsNodeConnectivity"
    )
    algTypes.foreach(at => {
      val algorithmName = algorithmAnalyze(classLoader, at)
      if (algorithmName != null && !algorithmName.equals("")) {
        LOGGER.info(s"Loading algorithm class ${at} to EData Context ")
        algorithms.put(algorithmName, at)
      }
    })


    /*Custom Executor*/
    val executorDirAbasolute = clazzDir + executorDir.replaceAll("\\.", "/")
    val executorClassDirFile = new File(executorDirAbasolute)
    val executorClassFileFiles = executorClassDirFile.listFiles()
    if (executorClassFileFiles != null && !executorClassFileFiles.isEmpty) {
      val classLoader = URLClassLoader.newInstance(Array(beanClassDirFile.toURI.toURL))
      executorClassFileFiles.foreach(file => {
        val className = executorDir + "." + file.getName.stripSuffix(".class")
        val executorName = customExecutorAnalyze(classLoader, className)
        if (executorName != null && !executorName.equals("")) {
          LOGGER.info(s"Loading external executor class ${className} to EData Context ")
          executors.put(executorName, className)
        }
      })
    } else {
      LOGGER.warn(s"No loadable scala | java executor exist in ${executorDir}")
    }


    new EDataManager(beanclassSchema, executors, algorithms)

  }

  def executorAnalyze(classLoader: ClassLoader, className: String): String = {
    var executorName = ""
    try {
      val clazz = classLoader.loadClass(className)
      val executorAnnotation = clazz.getAnnotation[Edata_Executor](classOf[Edata_Executor])
      if (executorAnnotation != null) {
        executorName = executorAnnotation.target()
      }
    } catch {
      case e: Exception => LOGGER.warn(s"Executor ${className} is not existed...")
    }
    executorName
  }

  def algorithmAnalyze(classLoader: ClassLoader, className: String): String = {
    var algorithmName = ""
    try {
      val clazz = classLoader.loadClass(className)
      val algorithmAnnotation = clazz.getAnnotation[Edata_Algorithm](classOf[Edata_Algorithm])
      if (algorithmAnnotation != null) {
        algorithmName = algorithmAnnotation.target()
      }
    } catch {
      case e: Exception => LOGGER.warn(s"Executor ${className} is not existed...")
    }
    algorithmName
  }

  def customExecutorAnalyze(classLoader: URLClassLoader, className: String): String = {
    val clazz = classLoader.loadClass(className)
    var executorName = ""
    val executorAnnotation = clazz.getAnnotation[Edata_Executor](classOf[Edata_Executor])
    if (executorAnnotation != null) {
      executorName = executorAnnotation.target()
    }
    executorName
  }

  def beanClassGraphAnalyze(classLoader: URLClassLoader, className: String): String = {
    val clazz = classLoader.loadClass(className)
    val graphAnnotation = clazz.getAnnotation[EData_Graph](classOf[EData_Graph])
    val clazzJsonObj = new JSONObject()
    if (graphAnnotation != null) {
      val objJson = new JSONObject()
      val graph_type = graphAnnotation.type_()
      objJson.put("SPACE", graphAnnotation.space())
      objJson.put("TARGET", graphAnnotation.target())
      objJson.put("TYPE", graph_type)
      clazzJsonObj.put("GRAPH", objJson)
      val fields = clazz.getDeclaredFields
      val colJsonArr = new JSONArray()
      fields.foreach(f => {
        val fieldAnnotation = f.getAnnotation[Edata_Field](classOf[Edata_Field])
        val idAnnotation = f.getAnnotation[Edata_Id](classOf[Edata_Id])
        val vIdAnnotation = f.getAnnotation[EData_VId](classOf[EData_VId])
        val sIdAnnotation = f.getAnnotation[EData_SId](classOf[EData_SId])
        val tIdAnnotation = f.getAnnotation[EData_TId](classOf[EData_TId])
        val colJsonObj = new JSONObject()
        val idJsonObj = new JSONObject()
        val vIdJsonObj = new JSONObject()
        val sIdJsonObj = new JSONObject()
        val tIdJsonObj = new JSONObject()
        if (fieldAnnotation != null) {
          colJsonObj.put("FIELD", f.getName)
          colJsonObj.put("COLUMN", fieldAnnotation.target())
          colJsonObj.put("DATATYPE", fieldAnnotation.dtype())
          colJsonArr.put(colJsonObj)
        }
        if (idAnnotation != null) {
          idJsonObj.put("FIELD", f.getName)
          idJsonObj.put("COLUMN", fieldAnnotation.target())
          idJsonObj.put("DATATYPE", fieldAnnotation.dtype())
          clazzJsonObj.put("ID", idJsonObj)
        }

        if (graph_type == "VERTEX" && vIdAnnotation != null) {
          vIdJsonObj.put("FIELD", f.getName)
          vIdJsonObj.put("COLUMN", fieldAnnotation.target())
          vIdJsonObj.put("DATATYPE", fieldAnnotation.dtype())
          clazzJsonObj.put("VID", vIdJsonObj)
        }

        if (graph_type == "EDGE") {
          if (sIdAnnotation != null) {
            sIdJsonObj.put("FIELD", f.getName)
            sIdJsonObj.put("COLUMN", fieldAnnotation.target())
            sIdJsonObj.put("DATATYPE", fieldAnnotation.dtype())
            clazzJsonObj.put("SID", sIdJsonObj)
          }

          if (tIdAnnotation != null) {
            tIdJsonObj.put("FIELD", f.getName)
            tIdJsonObj.put("COLUMN", fieldAnnotation.target())
            tIdJsonObj.put("DATATYPE", fieldAnnotation.dtype())
            clazzJsonObj.put("TID", tIdJsonObj)
          }
        }
      })
      clazzJsonObj.put("COLUMNS", colJsonArr)
    }
    clazzJsonObj.toString()
  }


  def beanClassObjectAnalyze(classLoader: URLClassLoader, className: String): String = {
    val clazz = classLoader.loadClass(className)
    val objAnnotation = clazz.getAnnotation[Edata_Object](classOf[Edata_Object])
    val clazzJsonObj = new JSONObject()
    if (objAnnotation != null) {
      val objJson = new JSONObject()
      objJson.put("TARGET", objAnnotation.target())
      clazzJsonObj.put("OBJECT", objJson)
      val fields = clazz.getDeclaredFields()
      val colJsonArr = new JSONArray()
      fields.foreach(f => {
        val fieldAnnotation = f.getAnnotation[Edata_Field](classOf[Edata_Field])
        val idAnnotation = f.getAnnotation[Edata_Id](classOf[Edata_Id])
        val colJsonObj = new JSONObject()
        val IDColJsonObj = new JSONObject()
        if (fieldAnnotation != null) {
          colJsonObj.put("FIELD", f.getName)
          colJsonObj.put("COLUMN", fieldAnnotation.target())
          colJsonObj.put("DATATYPE", fieldAnnotation.dtype())
          colJsonArr.put(colJsonObj)
        }
        if (idAnnotation != null) {
          IDColJsonObj.put("FIELD", f.getName)
          IDColJsonObj.put("COLUMN", fieldAnnotation.target())
          IDColJsonObj.put("DATATYPE", fieldAnnotation.dtype())
          clazzJsonObj.put("ID", IDColJsonObj)
        }

      })
      clazzJsonObj.put("COLUMNS", colJsonArr)
    }
    clazzJsonObj.toString()
  }


}