package com.shujia.card

import java.text.SimpleDateFormat
import java.util.Date

import com.alibaba.fastjson.JSON
import com.shujia.util.SparkStreamTool
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import redis.clients.jedis.Jedis
import com.shujia.util.CarUtil
object RealTimeCardDayFlow extends SparkStreamTool{


  override def run(args: Array[String]): Unit = {

    /**
      * 通过spark streaming 消费kafka中的数据进行处理
      *
      */

    val cars: DStream[CarUtil.Car] = CarUtil.loadKafkaCar(ssc,"RealTimeCardDayFlow")


    /**
      *
      * 实时统计每个卡扣当天总车流量
      */
    val kvDS: DStream[(String, Int)] = cars.map(car => {
      val card: Long = car.card
      val time: Long = car.time

      //将时间戳转换成天
      val date = new Date(time * 1000)
      val format = new SimpleDateFormat("yyyyMMdd")
      val day: String = format.format(date)

      (card + ":" + day, 1)
    })

    //实时统计车流量
    //累加求和，seq : 当前batch 一个key所有的数据，opt是之前统计的结果
    val countDS: DStream[(String, Int)] = kvDS.updateStateByKey((seq: Seq[Int], opt: Option[Int]) => Option(seq.sum + opt.getOrElse(0)))


    //将统计结果保存到redis中
    countDS.foreachRDD(rdd => {

      rdd.foreachPartition(iter => {
        //1、简历redis的链接
        val jedis = new Jedis("master", 6379)

        iter.foreach {
          case (credAndDay: String, num: Int) =>

            //以卡口和日期作为key,车流量作为value将数据保存到redis中
            jedis.set("RealTimeCardDayFlow:" + credAndDay, num.toString)
        }

        //关闭链接
        jedis.close()
      })

    })



  }



}
