package com.cmsr.hdpf.etl.task

import com.alibaba.fastjson.JSON
import com.alibaba.fastjson.serializer.SerializeConfig
import com.cmsr.hdpf.etl.BaseTask
import com.sun.xml.internal.bind.v2.TODO

import scala.collection.{SortedSet, mutable}
import scala.util.parsing.json.JSONObject
//import com.cmsr.hdpf.etl.task.parse.GTRsmParser
import com.cmsr.hdpf.etl.watermark.PeriodicAssigner
import com.cmsr.hdpf.model.{RsmDataStream, SuzhouRoadMap}
import com.cmsr.hdpf.sink.kafka.KafkaSink
import com.cmsr.hdpf.sink.hdfs.HdfsSink
import com.cmsr.hdpf.source.KafkaSource
import com.cmsr.hdpf.util.ConfigHelper
import org.apache.flink.api.common.functions.FlatMapFunction
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.streaming.api.scala.function.ProcessWindowFunction
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.api.scala.{DataStream, _}
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
//import org.apache.flink.streaming.connectors.fs.StringWriter
import org.apache.flink.util.Collector
import org.slf4j.LoggerFactory
//import org.apache.hadoop.fs.FileSystem
//import org.apache.hadoop.fs.Path
//import org.apache.hadoop.conf.Configuration
//import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink

import java.text.SimpleDateFormat
import java.util.Date


/**
 * 解析高铁新城交通参与者数据
 */
case class GTParseRawJsonTask() extends BaseTask {
    
    // 解析结果数据流
    var flat_rsm_Stream: DataStream[String] = null // 将读取的kafka（或rmq）数据进行解析，再经过处理后，得到DataStream[String]
    
    val baseTaskLogger = LoggerFactory.getLogger("ParseRawJsonTask")

    var data_cnt = 0
    
    /**
     * 实现任务处理方法
     *
     * @param envType
     * @param sourceType
     * @param sinkType
     * @param parser
     * @param debug
     * @param windowSize
     */
    override def processTask(sourceType: String, sinkType: String, debug: String, windowSize: Int): Unit = {

//        println("parseRawJson")
        parseRawJson(sourceType, sinkType)
    }
    
    /**
     *
     * @param envType
     * @param sourceType
     * @param sinkType
     * @param parser
     */
      // Todo:函数主逻辑
    def parseRawJson(sourceType: String, sinkType: String): Unit = {
        // 获取flink的执行环境以及数据源stream
        val (contextEnv: StreamExecutionEnvironment, stream: DataStream[String]) = initDataSource(sourceType)
        println("默认并行数：", stream.parallelism, contextEnv.getParallelism) // cpu core 实际并发度是 3

        // 使用flatMap算子进行处理，将读取的json字符串解析为RsmDataStream类类型
        val rsm_flat = stream.flatMap(new GTRsmParseJsonFlatMap)
//                .process(new SleepProcessFunction) // 控制数据流速
                .assignTimestampsAndWatermarks(new PeriodicAssigner) // 弃用，替代操作是？
        println("end of mapping")
        println("rsm_flat: " + rsm_flat.parallelism)
        println("data cnt: ", data_cnt)
        data_cnt += 1

////        // 多个 kafka 分区读取数据的顺序？
        val rsm_flat_delta = rsm_flat
//          .setParallelism(1)
                .keyBy(_.id)
                .window(TumblingProcessingTimeWindows.of(Time.seconds(1500))) // window 算子：滚动窗口时间 TODO:修改聚合时间
                .process(new GTRsmDeltaSpeedWindowFunction) // process 算子：每个时间窗口内的每个交通参与者？
        println("rsm_flat_delta: " + rsm_flat_delta.parallelism)

//        // 将上步结果，转为json字符串类型
        flat_rsm_Stream = rsm_flat_delta
                .map(x => x.toJsonString())
//        println("flat_rsm_Stream: " + flat_rsm_Stream.parallelism) // 输出并行数量
        println("process result: ", flat_rsm_Stream.print()) // 初始化阶段只是输出提示内容和函数，执行阶段才输出计算结果（以并发度为分组输出）！

        saveToSink(sinkType) // 什么时候执行？ -> 初始化时候执行
        contextEnv.execute("->[kafka,gaotie]->PreTask->ParseRawJsonTask") // 开始执行，此时整个数据流才开始被处理！

    }
    
    private def initDataSource(sourceType: String) = {
        val contextEnv = StreamExecutionEnvironment.getExecutionEnvironment
        var stream: DataStream[String] = null

        contextEnv.enableCheckpointing(60000)// checkpoints

        if (sourceType.toLowerCase.contains("kafka")) {

            // TODO:改为本地文件
            stream = contextEnv
                    .addSource(KafkaSource.GetKafkaConsumer(data = ConfigHelper.parse_source_kafka_topic_raw_traffic, group = ConfigHelper.parse_source_kafka_topic_raw_traffic))
                    .name("traffic_flow_hdfs")
//            stream = contextEnv.readTextFile("D:/V2X/bsm2.txt")
            System.out.println("stream initialized***************************************")
        }

        (contextEnv, stream)
    }
    
    private def saveToSink(sinkType: String): Unit = {

        println("sink to kafka:", sinkType.toLowerCase.contains("kafka")) // 空转了一轮？ -> 初始化阶段！
        if (sinkType.toLowerCase.contains("kafka")) { // kafka
            flat_rsm_Stream.addSink(KafkaSink.GetKafkaProducer(ConfigHelper.parse_sink_kafka_topic_rsm))
                    .name(ConfigHelper.parse_sink_kafka_topic_rsm + "(kafka)")
        }

        if (sinkType.toLowerCase.contains("hdfs")) { // hdfs 本地运行需要一些 hadoop 依赖，可以直接集群运行！
//            val sink = new BucketingSink[String]("hdfs://shzj-zhlw-dev-bigdata08:50070/tmp/bjtu/traffic_flow") // 数据类型 String 与 flat_rsm_Stream 相对应
//            sink.setWriter(new StringWriter()).setBatchSize(20).setBatchRolloverInterval(60)
            println("sink: hdfs")
//
//            flat_rsm_Stream.addSink(sink)
//              .name(ConfigHelper.parse_sink_kafka_topic_rsm + "(hdfs)")
            flat_rsm_Stream.addSink(HdfsSink.GetDataLakeSink("/tmp/bjtu/bsm3", ".txt"))
              .name(ConfigHelper.parse_sink_kafka_topic_rsm + "(hdfs)")

        }
    }
}


private class GTRsmParseJsonFlatMap extends FlatMapFunction[String, RsmDataStream] { // 每个分区内数据是有序的，但是不保证全局有序，因此可能导致一个时间窗口内的数据可能没有全部聚集起来！
  var message_cnt:Int = 0 // 多个线程，每个线程不一定只读取一个 kafka 分区的数据？

  def tranTimeToString(tm:String) :String={
    val fm = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
    val tim = fm.format(new Date(tm.toLong))
    tim
  }

   //TODO: 解析消息，抽取UID用于聚合，抽取常见的字段
    override def flatMap(json: String, out: Collector[RsmDataStream]): Unit = {
//        println("Thread id:" + Thread.currentThread().getId + ", message cnt:" + message_cnt) // 一次处理一条数据 -> 多线程！？ -> 对应多个 kafka 分区
      message_cnt += 1
//
//      println("traffic data: ", json)
      val begin_idx = json.indexOf('{')
      // 解析消息时间戳
      val msg_timestamp_str = json.substring(0,begin_idx)
//      println("msg_timestamp_str:"+msg_timestamp_str)
      val point_idx = msg_timestamp_str.indexOf('_')
      val second_timestamp = msg_timestamp_str.substring(0,point_idx)
//      println("second_timestamp:"+ second_timestamp)
      val remain_str = msg_timestamp_str.substring(point_idx+1,msg_timestamp_str.length)
//      println("remain_str:"+remain_str)
      val minisecond_timestamp = remain_str.substring(0,remain_str.indexOf('_'))
//      println("minisecond_timestamp:"+minisecond_timestamp)

      val total_timestamp = second_timestamp //+ minisecond_timestamp
      val time = tranTimeToString(total_timestamp)

      // 解析json
      val js_str = json.substring(begin_idx)
      val jObject = JSON.parseObject(js_str) // 将json转为object
      val user_id = jObject.getString("id")

      // 解析初始位置和时间
//      println("jObject",jObject)
      /*
      try {
        val initialPosition = jObject.getJSONObject("safetyExt").getJSONObject("pathHistory").getJSONObject("initialPosition")
//        println("initial position:", initialPosition)
        val lon_init = initialPosition.getJSONObject(("pos")).getLong("long_")
        val lat_init = initialPosition.getJSONObject(("pos")).getLong("lat")
        val utcTime = initialPosition.getJSONObject("utcTime")
        val timestamp_str = utcTime.getLong("year") + '-' + utcTime.getLong("month") + '-' + utcTime.getLong("day") + ' ' + utcTime.getLong("hour") + ':' + utcTime.getLong("minute") + ':' + utcTime.getLong("second")
//        println("timestamp:",timestamp_str)
      } catch {
        case e: Exception => print()
      }
      */
      // 解析轨迹点数据
      var tr_list_str = ""
      val crumbdata = jObject.getJSONObject("safetyExt").getJSONObject("pathHistory").getJSONArray("crumbData") //get("safetyExt")
      val tr_len = crumbdata.size()
      var i = 0
//      var tr_list:List[Tuple2[String,String]]= List()
      val set = scala.collection.mutable.Set[Tuple2[String,String]]()
      for (i <- 0 until tr_len){
        val str = crumbdata.get(i).toString
//          println("str",str)
        val loc_json=JSON.parseObject(str).getJSONObject("llvOffset").getJSONObject("offsetLL")
        try{
          val supported_key :List[String] = List("position_LatLon","position_LL1","position_LL2","position_LL3","position_LL4","position_LL5")
          var lon: String = " "
          var lat: String = " "
          for (sk <- supported_key) {
            if (loc_json.containsKey(sk)){
              val lonlat = loc_json.getJSONObject(sk)
              lon = lonlat.getString("lon")
              lat = lonlat.getString("lat")
              tr_list_str =  tr_list_str + "" + lon + "," + lat + "," + total_timestamp
              val loc = Tuple2(lon,lat)
              set += loc
            }
          }
          if (i < tr_len-1){
            tr_list_str =  tr_list_str + ";"
          }
        } catch{
          case e: Exception => println("the message is not in supported format")
        }
      }
      val uid = user_id
      val timestamp: Long = jObject.getLong("time") // 获取时间戳 单位ms
      val tr:String = "lon,lat"
//        val rst = new RsmDataStream(timestamp = timestamp_str,id=uid,pt_list = tr_list_str)
      val rst = new RsmDataStream(timestamp = second_timestamp.toLong,id=uid,pt_list = tr_list_str)
//      println("rst",rst)
//      println()
//      println("time:"+time+"id:"+uid+",tr:"+set.take(1))
      out.collect(rst)
//      Thread.sleep(30000) // 延迟一段时间，生成水印
    }
}

class SleepProcessFunction extends ProcessFunction[RsmDataStream,RsmDataStream]{ // 似乎无效？
    override def processElement(value:RsmDataStream,
                                ctx: ProcessFunction[RsmDataStream, RsmDataStream]#Context,
                                out: Collector[RsmDataStream]): Unit = {
        out.collect(value)
        //Thread.sleep(1000) // 每输出一条数据暂停一段时间
    }
}

/**
 * 计算加速度
 */
//
// TODO：增加轨迹拼接和聚合逻辑
private class GTRsmDeltaSpeedWindowFunction extends ProcessWindowFunction[RsmDataStream, RsmDataStream, String, TimeWindow] {
   // 解析轨迹字符串为对应的Tuple
    def extract_pts(pt_list:String): scala.collection.mutable.LinkedHashSet[Tuple3[String, String,String]]={
      var s = scala.collection.mutable.LinkedHashSet[Tuple3[String, String,String]]()
      println("ptlist",pt_list)
      var all_pts = pt_list.split(";").toList
      var all_pts_itr = all_pts.iterator
      println("extract points")
      println("all_pts",all_pts)
      while (all_pts_itr.hasNext){
        //TODO: parsr str to json
        var pt_str =  all_pts_itr.next()
        var pt = pt_str.split(",").toList
        var lon = (pt.apply(0).toDouble *1e-7).toString
        var lat = (pt.apply(1).toDouble *1e-7).toString
        var time = pt.apply(2)
//        println(lon,lat,time)
        var pt_tuple = Tuple3(lon,lat,time)
        s = s + pt_tuple
      }
      s
    }

    override def process( // // 沿着上一次的时间继续聚集？
                                key: String,
                                ctx: Context,
                                input: Iterable[RsmDataStream],
                                out: Collector[RsmDataStream]): Unit = { // 每一分钟内的数据很少？上传频率是多少？
        println("process window:" + key, ctx.window, ctx.currentWatermark, ctx.window.maxTimestamp(), input.size, Thread.currentThread().getId) // 不止一个输出，并行计算？一个时间窗口内的数据不一定全部都能被聚集，存在遗漏？
        println("input:"+input)
        val trs = input.toList.sortBy(_.timestamp).iterator // 依据时间戳进行排序
        println("trs:",input.toList.sortBy(_.timestamp))
        var tr_bk:RsmDataStream = null
        var all_pts  = scala.collection.mutable.LinkedHashSet[(String,String,String)]()
        while (trs.hasNext){
          var tr = trs.next()
//          println(Thread.currentThread().getId, key, ctx.window, tr.timestamp)
//          println("id",tr.id," tr_list",tr.pt_list)
          if (tr_bk!= null){
              all_pts =  all_pts ++  extract_pts(tr.pt_list) // all points保存的是元组对象，extract返回的也是元组对象
              println("id",tr.id,"all_pts:size",all_pts.size,"all_pts",all_pts)
          } else {
            tr_bk = tr // 只需要修改pt_list
            all_pts =  all_pts ++ extract_pts(tr.pt_list) //集合的并
            println("id",tr.id,"all_pts:size",all_pts.size,"init_pts",all_pts)
          }
        }
        tr_bk.pt_list = all_pts.toString() // 遍历完最后一个轨迹点之后，再转String
        println(tr_bk)
        out.collect(tr_bk)
    }
}