package com.bus.stream

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

object BusStreamAnalysis {

  def main(args: Array[String]): Unit = {
    //SparkConf的设置，本地模式来跑
    val conf = new SparkConf().setMaster("local[2]").setAppName("BusStreamAnalysis")
    //StreamingContext的设置，每隔10秒钟处理一次数据
    val ssc = new StreamingContext(conf, Seconds(300))

    //kafka配置信息
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop100:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "gname",
      //如果没有记录偏移量,就消费最新的数据
      "auto.offset.reset" -> "latest",
      //spark 消费kafka中的偏移量自动维护: kafka 0.10之前的版本自动维护在zookeeper  kafka 0.10之后偏移量自动维护topic(__consumer_offsets)
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    //topic
    //获取kafka dstream
    val kafkaDStream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Set("mytopic"), kafkaParams)
    )

    getBusNum(kafkaDStream)
    getStationTopN(kafkaDStream)

    ssc.start()
    ssc.awaitTermination()
  }

  def getBusNum(kafkaDStream: InputDStream[ConsumerRecord[String, String]]): Unit = {
    val mapDStream = kafkaDStream.map(record => {
      val value = record.value()
      val item = value.split("\\|", -1)

      val bus = item(0)
      val num = item(1).toInt

      bus -> num
    })

    val groupDStream = mapDStream.groupByKey().map(tuple => {
      val bus = tuple._1
      val busLst = tuple._2.toList
      val avg = (busLst.sum.toFloat) / (busLst.size)
      bus + "|" + avg
    })

    groupDStream.foreachRDD(rdd => {
      rdd.foreachPartition(records => {
        val conn = MySQLUtil.getConnection()
        try {
          //把数据循环处理
          for (record <- records) {
            val item = record.split("\\|", -1)
            val bus = item(0)
            val num = item(1).toFloat

            Dao.update(conn, bus, num)
          }
        } finally {
          MySQLUtil.closeConnection(conn)
        }
      })
    })
  }

  def getStationTopN(kafkaDStream: InputDStream[ConsumerRecord[String, String]]): Unit = {
    val mapDStream = kafkaDStream.map(record => {
      val value = record.value()
      val item = value.split("\\|", -1)

      val station = item(3)
      val num = item(1).toInt

      station -> num
    })

    val groupDStream = mapDStream.groupByKey().map(tuple => {
      val station = tuple._1
      val stationLst = tuple._2.toList
      val avg = (stationLst.sum.toFloat) / (stationLst.size)
      station + "|" + avg
    })

    groupDStream.foreachRDD(rdd => {
      val sortRDD = rdd.coalesce(1).sortBy(record => record.split("\\|", -1)(1), false)

      //先删除数据
      Dao.delete()
      val conn = MySQLUtil.getConnection()
      try {
        sortRDD.take(5).foreach(record => {
          val item = record.split("\\|", -1)
          val station = item(0)
          val num = item(1).toFloat
          Dao.insert(conn, station, num)
        })
      } finally {
        MySQLUtil.closeConnection(conn)
      }
    })
  }
}
