package com.shujia.spark.stream

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

import java.lang
import java.sql.{Connection, DriverManager, PreparedStatement}
import java.text.SimpleDateFormat
import java.util.Date

object Demo7Car {
  def main(args: Array[String]): Unit = {
    //创建spark环境
    val conf = new SparkConf()
    conf.setAppName("state")
    conf.setMaster("local[2]")
    val sc = new SparkContext(conf)

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    ssc.checkpoint("data/checkpoint")

    //读取数据
    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)

    //解析数据，取出卡口编号和车速
    val cardAndSpeedDS: DStream[(Long, (Double, Int))] = linesDS.map(line => {
      //将json字符串转换成json对象
      val jsonObj: JSONObject = JSON.parseObject(line)
      //通过key获取value
      val card: Long = jsonObj.getLong("card")
      val speed: Double = jsonObj.getDouble("speed")
      (card, (speed, 1))
    })

    /**
     * 卡口拥堵情况统计，
     * 实时统计每个卡口的最近一段时间的车流量和平均车速，每隔一段时间统计一次
     *
     */
    //计算总的车速和总的车流量
    val sumSpeedAndFlow: DStream[(Long, (Double, Int))] = cardAndSpeedDS
      .reduceByKeyAndWindow(
        (x, y) => (x._1 + y._1, x._2 + y._2),
        windowDuration = Durations.seconds(20),
        slideDuration = Durations.seconds(10)
      )

    //计算平均车速
    val resultDS: DStream[(Long, String, Int, Double)] = sumSpeedAndFlow
      .map {
        case (card: Long, (speed: Double, flow: Int)) =>
          val avgSpeed: Double = speed / flow

          //获取当前的计算时间
          val date = new Date()
          //格式日期
          val format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
          val comDate: String = format.format(date)

          (card, comDate, flow, avgSpeed)
      }

    /**
     * 将统计统计结果保存到数据库中
     */
    resultDS.foreachRDD(rdd => {
      //循环将数据保存到Mysql中
      /**
       * foreachPartition: 每一次遍历一个分区的数据
       * iter: 分区内所有的数据
       */
      rdd.foreachPartition(iter => {
        /**
         * 为每一个分区创建一个数据库的连接,而不是每一条数据创建一个连接
         */
        println("创建数据库连接")
        val start: Long = System.currentTimeMillis()
        //1、加载驱动
        Class.forName("com.mysql.jdbc.Driver")
        //2、创建数据库连接
        val con: Connection = DriverManager.getConnection("jdbc:mysql://master:3306/bigdata", "root", "123456")
        val end: Long = System.currentTimeMillis()
        println(end - start)

        //foreach: 是迭代器的方式内外都在Executor
        iter.foreach {
          case (card: Long, comDate: String, flow: Int, avgSpeed: Double) =>
            //3、编写sql
            val stat: PreparedStatement = con.prepareStatement("insert into card_avg_speed_flow values(?,?,?,?)")
            //4、赋值
            stat.setLong(1, card)
            stat.setString(2, comDate)
            stat.setInt(3, flow)
            stat.setDouble(4, avgSpeed)
            //5、执行数据插入
            stat.execute()
        }
        //6、关闭连接
        con.close()
      })
    })
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
