package org.zjt.spark.traffic

import kafka.serializer.{Decoder, StringDecoder}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka.KafkaUtils
import com.fasterxml.jackson.annotation.JsonProperty
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper}


/**
  * DESC   spark处理，并将结果放到redis中。
  *
  *
  *              foreach = foreachPartition
  *         ERROR:
  *             foreach 传入成员必须是支持序列化的
  *             foreachPartition 传入成员可不是序列化对象
  *
  * @author
  **/
object HandlerTrafficMeg extends App {

  val config = new SparkConf().setMaster("local[2]").setAppName("HandlerTrafficMeg")
  val ssc = new StreamingContext(config, Seconds(2))
  ssc.checkpoint("./checkpoint")
  var mapper = new ObjectMapper()
  mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)

  // TODO: {"camera_id":"123456714106","road_id":"01","car_id":"京A58F37","event_time":"2055-12-31 08:30:30","speed":"58"}
  val result = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, Map("metadata.broker.list" -> "centos:9092"), Set("test"))
    .map(_._2).transform { rdd => rdd.map { msg => mapper.readValue(msg, classOf[MsgEntitty]) }
  }.map(a => (a.car_id, a))
    .reduceByKeyAndWindow(
      (a: MsgEntitty, b: MsgEntitty) => {
        (a.speed.toInt + b.speed.toInt).toString;
        a.cnt += 1;
        a
      },
      (a: MsgEntitty, b: MsgEntitty) => {
        a.speed = (a.speed.toInt - b.speed.toInt).toString;
        a.cnt -= 1;
        a
      }, Seconds(30), Seconds(10))
    .filter(_._2.cnt != 0)
    .map(a => (a._1, a._2.speed.toInt / a._2.cnt.toDouble, a._2.speed, a._2.cnt)).persist()

  println(result.count())


  val zsetName = "zset".getBytes()
  result.foreachRDD {
    rdd => {
      /**
        * Foreach与foreachPartition异同：
        *
        *     同：
        *       Foreach与foreachPartition都是在每个partition中对iterator进行操作,
        *
        *     异：
        *       不同的是,foreach是直接在每个partition中直接对iterator执行foreach操作,而传入的function只是在foreach内部使用,    （foreach ：function传入，所以function中的对象都必须支持序列化）
        *       而foreachPartition是在每个partition中把iterator给传入的function,让function自己对iterator进行处理.            (foreachPartition： function传出，所以不需要序列化)
        */
      rdd.foreachPartition(
        records => {
          val jedis = RedisClient.pool.getResource
          records.foreach(
            msg => {
              var context = msg.toString().getBytes()
              jedis.zadd(zsetName, msg._2, context)
              println("zset的元素个数:%s".format(jedis.zcard(zsetName)))
            }
          )
          jedis.close()
        }
      )
    }
  }


  ssc.start()
  ssc.awaitTermination()
  ssc.stop()


}


@SerialVersionUID(1)
class MsgEntitty extends Serializable {

  @JsonProperty("camera_id")
  var camera_id: String = ""

  @JsonProperty("road_id")
  var road_id: String = ""

  @JsonProperty("car_id")
  var car_id: String = ""

  @JsonProperty("event_time")
  var event_time: String = ""

  @JsonProperty("speed")
  var speed: String = ""

  var cnt: Int = 1

}