package com.dtxy.data

/**
 * Created by juice on 2015/8/14.
 */

import java.util
import java.util.Map.Entry
import java.util.regex.Pattern
import java.io.Serializable
import javax.script.{ScriptEngine, Invocable, ScriptEngineManager}

import com.dtxy.xdb.entity. Point
import org.apache.spark.rdd.RDD
import org.apache.thrift.protocol.TCompactProtocol
import org.apache.thrift.transport.TFramedTransport
import org.apache.thrift.transport.TSocket

import org.apache.spark.SparkContext
import org.apache.spark.SparkConf

import scala.collection.JavaConversions._

object Compute {

  //待计算的表达式,实际计算可以从数据库读出
  val expstr=Array("if({DT.HN.Tag12}>=50) var a={DT.HN.Tag19}+Func_1({DT.HN.Tag89})/3600;return a;")
  //xdb 数据库相关参数
  val xdbAddress = XdbHost.getAddress("xdbAddress")
  val port = XdbHost.getPort("port")
  val timeout = XdbHost.getTimeout("timeout")
  val master = XdbHost.getMaster("sparkMaster")

  val MaxSizeThrift=320000
  val taskNum = 24
  //val executorMemory="6g"
  //val hdfsPath="hdfs://h86:9000/input/Exp30w.txt"

  val transport = new TFramedTransport(new TSocket(xdbAddress, port, timeout))//记得调用close释放资源
  val protocol = new TCompactProtocol(transport)
  val xdbClient = new XdbThrift.Client(protocol)

  def run(hdfsPath:String,taskNum:Int,executorMemory:String): String =
  {
    //win7环境下运行须加
    System.setProperty("hadoop.home.dir" , "E:\\Program Files\\hadoop-2.7.0" )
    //val sparkConf = new SparkConf().setAppName("COMPE").setMaster("local").set("spark.executor.memory", executorMemory)
    val sparkConf = new SparkConf().setAppName("COMPE").setMaster(master).set("spark.executor.memory", executorMemory)
    //部署到服务器端时不需要setJars
//    .setJars(List("D:\\compe\\target\\bigdata-1.0-SNAPSHOT.jar","E:\\maven_repo\\com\\dtxy\\xdb-common\\0.0.1-SNAPSHOT\\xdb-common-0.0.1-SNAPSHOT.jar","E:\\maven_repo\\org\\apache\\thrift\\libthrift\\0.9.2\\libthrift-0.9.2.jar"))

    val sc = new SparkContext(sparkConf)

    //获取RDD
    val exps = sc.textFile(hdfsPath,taskNum)
    val sisCal = new SISCal_2()
    //获取点数据集
    val points = preRun(sisCal, exps)
    //执行计算
    startRun(sc, points, exps, sisCal)
  }

  //读取hdfs下的数据转换成RDD
  def run(hdfsPath:String, taskNum:Int, paramsMap:util.Map[String, String]): String =
  {

    val sparkContext = init(paramsMap)//初始化获得SparkContext

    val exps = sparkContext.textFile(hdfsPath,taskNum)//获取RDD
    val sisCal = new SISCal_2()

    val points = preRun(sisCal, exps)//获取点数据集

    startRun(sparkContext, points, exps, sisCal)//执行计算
  }

  //读取集合数据转化成RDD
  def run(jsList:util.List[String], taskNum:Int, paramsMap:util.Map[String, String]): String =
  {

    val sparkContext = init(paramsMap)//初始化获得SparkContext

    val exps = sparkContext.parallelize(jsList, taskNum)//获取RDD
    val sisCal = new SISCal_2()

    val points = preRun(sisCal, exps)//获取点数据集

    startRun(sparkContext, points, exps, sisCal)//执行计算
  }

  def init(paramsMap:util.Map[String, String]): SparkContext =
  {
    //val sparkConf = new SparkConf().setAppName("COMPE").setMaster("local")
        val sparkConf = new SparkConf().setAppName("COMPE").setMaster(master)
    //部署到服务端不需要setJars
      .setJars(List("D:\\compe\\target\\bigdata-1.0-SNAPSHOT.jar","E:\\maven_repo\\com\\dtxy\\xdb-common\\0.0.1-SNAPSHOT\\xdb-common-0.0.1-SNAPSHOT.jar","E:\\maven_repo\\org\\apache\\thrift\\libthrift\\0.9.2\\libthrift-0.9.2.jar"))

    if(paramsMap != null && !paramsMap.isEmpty)
    {
      for(entry:Entry[String, String] <- paramsMap.entrySet())
      {
        sparkConf.set(entry.getKey.toString, entry.getValue.toString)
      }
    }

    new SparkContext(sparkConf)
  }

  def preRun(sisCal:SISCal_2, exps:RDD[String]): List[Point] =
  {
    val beginTime = System.currentTimeMillis()
    val points = sisCal.getPoints(exps)
    println("收集数据耗时%s 秒".format((System.currentTimeMillis() - beginTime) / 1000))
    points
  }

  def startRun(sc: SparkContext, points: List[Point], exps: RDD[String], sisCal: SISCal_2): String =
  {
    val beginTime = System.currentTimeMillis()
    //points.foreach(println)
    val beginTime_getData = System.currentTimeMillis()
    transport.open()
    val realPoints = getRealDatas(points.toList)
    transport.close()
    //realPoints.foreach(p=>println(p._1+"="+p._2.getValue))
    println("从xdb读实时数据耗时%s 秒".format((System.currentTimeMillis() - beginTime_getData) / 1000))

    //计算
    val beginTime_comp = System.currentTimeMillis()
    val result = sisCal.comp(exps, realPoints)
    println("计算耗时%s 秒".format((System.currentTimeMillis() - beginTime_comp) / 1000))
    sc.stop()
    val endTime = System.currentTimeMillis()
    val tip="处理%s条，总共耗时：%s 秒".format(result, (endTime - beginTime) / 1000)
    println(tip)
    tip
  }

  //用于分批读实时数据，因为thrift传输有大小限制,具体以point算可能估计32万点左右
  def getRealDatas(points:List[Point]): Map[String,Point] =
  {
    var tmpPoints=points
    var resultMap:Map[String,Point]=Map()
    var i=0
    while(tmpPoints.length>MaxSizeThrift){
      resultMap=resultMap++xdbClient.getRealPointsWithResulMap(tmpPoints.take(MaxSizeThrift))
      tmpPoints=tmpPoints.drop(MaxSizeThrift)
      i=i+1
      println("第%s次读取xdb".format(i))
    }

    resultMap=resultMap++xdbClient.getRealPointsWithResulMap(tmpPoints)

    resultMap
  }

  def main(args: Array[String]): Unit = {
    transport.open()
    //    run("hdfs://h86:9000/input/Exp5w.txt",24,"6g")
    /*val map:util.Map[String, String] = new util.HashMap[String, String]()
    map.put(computeConstants.SPARK_EXECUTOR_MEMORY_KEY, "512m")
    map.put(computeConstants.SPARK_SERIALIZER_KEY, computeConstants.SPARK_SERIALIZER_VALUE)
//    map.put("spark.task.cpus", "9")
    map.put("spark.cores.max","8")

    val exp1 = "if({DT.HN.Tag010245}>={DT.HN.Tag039949}) var a={DT.HN.Tag013897}%Func_1({DT.HN.Tag027735})/3600;return a;"
    val exp2 = "if({DT.HN.Tag043567}>={DT.HN.Tag017030}) var a={DT.HN.Tag033858}%Func_1({DT.HN.Tag012560})/3600;return a;"
    val exp3 = "if({DT.HN.Tag010770}>={DT.HN.Tag014910}) var a={DT.HN.Tag044849}%Func_1({DT.HN.Tag022740})-3600;return a;"
    val expList: util.ArrayList[String] = new util.ArrayList[String]()
    expList.add(exp1)
    expList.add(exp2)
    expList.add(exp3)

    run(expList, 24, map)*/
    xdbClient.test("haha")
  }

}

class SISCal_2 extends Serializable
{
  //抽取点名的正则
  val P=Pattern.compile("\\{(.*?)\\}")

  def getPoints (rdd: RDD[String]): List[Point] =
  {
    //得到涉及到的所有点集合
    val points=rdd.flatMap {
      exp =>
        //抽取点名
        var points = List[Point]()
        val M = P.matcher(exp)
        while (M.find) {
          val pos = M.group(1).lastIndexOf('.')
          val p = new Point()
          p.setDir(M.group(1).substring(0, pos))
          p.setName(M.group(1).substring(pos + 1))
          //println("point %s in exp %s".format(M.group(1),exp))
          points=p::points
        }
        points
      }.distinct().collect()
    points.toList
  }

  def comp(rdd: RDD[String],realPoints:Map[String,Point]): Long =
  {
    val result=rdd.map{
      exp=>
        //用点值替换表达式
        val exp2=getExp(exp,realPoints)
        //计算表达式,
        ScriptEngineSingleton.getEngine().eval(String.format("function f(){%s}", exp2))
        val compe=ScriptEngineSingleton.getInvocable().invokeFunction("f")
//        println("exp %s is replaced by value exp2 %s=%s".format(exp,exp2,compe))

    }.count()

    result
  }

  private def getExp(exp:String,realPoints:Map[String,Point]): String =
  {
    var reExp=exp
    val M=P.matcher(reExp)
    while (M.find){
      reExp=reExp.replace(M.group(),realPoints(M.group(1)).getValue.toString)
    }
    reExp
  }

  /** instantiated singleton instance of ScriptEngine */
  object ScriptEngineSingleton
  {
    @transient private val manager = new ScriptEngineManager;
    @transient private val engine = manager.getEngineByName("javascript");
    @transient private val inv = engine.asInstanceOf[Invocable]
    engine.eval("function Func_1(obj){return obj;}")

    def getEngine(): ScriptEngine = {
      engine
    }
    def getInvocable(): Invocable = {
      inv
    }
  }

}
