package com.alison.sparkstream.action

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Minutes, Seconds, StreamingContext}

import java.text.SimpleDateFormat
import java.util.Date

object E4_oneHour {

  def main(args: Array[String]): Unit = {
    //1.创建 SparkConf
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("RealTimeApp")

    //2.创建 StreamingContext
    val ssc = new StreamingContext(sparkConf, Seconds(3))

    //3.读取 Kafka 数据 1583288137305 华南 深圳 4 3
    val kafkaDStream: InputDStream[ConsumerRecord[String, String]] =
      MyKafkaUtil.getKafkaStream("spark-topic", ssc)

    //4.将每一行数据转换为样例类对象
    val adsLogDStream: DStream[Ads_log] = kafkaDStream.map(record => {
      //a.取出 value 并按照" "切分
      val arr: Array[String] = record.value().split(" ")
      //b.封装为样例类对象
      Ads_log(arr(0).toLong, arr(1), arr(2), arr(3), arr(4))
    })

    //5.统计最近一小时广告分时点击总数
    val adToHmCountListDStream: DStream[(String, List[(String, Long)])] =
      LastHourAdCountHandler.getAdHourMintToCount(adsLogDStream)

    //6.打印
    adToHmCountListDStream.print()

    //7.开启任务
    ssc.start()
    ssc.awaitTermination()
  }

  case class Ads_log(timestamp: Long, area: String, city: String, userid: String, adid: String)

  object LastHourAdCountHandler {
    //时间格式化对象
    private val sdf: SimpleDateFormat = new SimpleDateFormat("HH:mm")

    /**
     * 统计最近一小时广告分时点击总数
     *
     * @param filterAdsLogDStream 过滤后的数据集
     * @return
     */

    def getAdHourMintToCount(filterAdsLogDStream: DStream[Ads_log]):
    DStream[(String, List[(String, Long)])] = {
      //1.开窗 => 时间间隔为 1 个小时 window()
      val windowAdsLogDStream: DStream[Ads_log] =
        filterAdsLogDStream.window(Minutes(60))

      //2.转换数据结构 ads_log =>((adid,hm),1L) map()
      val adHmToOneDStream: DStream[((String, String), Long)] =
        windowAdsLogDStream.map(adsLog => {
          val timestamp: Long = adsLog.timestamp
          val ts: String = sdf.format(new Date(timestamp))
          ((adsLog.adid, ts), 1L)
        })

      //3.统计总数 ((adid,hm),1L)=>((adid,hm),sum) reduceBykey(_+_)
      val adHmToCountDStream: DStream[((String, String), Long)] =
        adHmToOneDStream.reduceByKey(_ + _)

      //4.转换数据结构 ((adid,hm),sum)=>(adid,(hm,sum)) map()
      val adToHmCountDStream: DStream[(String, (String, Long))] =
        adHmToCountDStream
          .map {
            case ((adid, hm), count) =>
              (adid, (hm, count))
          }

      //5.按照 adid 分组 (adid,(hm,sum))=>(adid,Iter[(hm,sum),...]) groupByKey
      adToHmCountDStream.groupByKey()
        .mapValues(iter =>
          iter.toList.sortWith(_._1 < _._1)
        )
    }
  }

  object MyKafkaUtil {
    // kafka 消费者配置
    val kafkaParam = Map(
      "bootstrap.servers" -> "localhost:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      //消费者组
      "group.id" -> "commerce-consumer-group",
      //如果没有初始化偏移量或者当前的偏移量不存在任何服务器上，可以使用这个配置属性
      //可以使用这个配置，latest 自动重置偏移量为最新的偏移量
      "auto.offset.reset" -> "latest",
      //如果是 true，则这个消费者的偏移量会在后台自动提交,但是 kafka 宕机容易丢失数据
      //如果是 false，会需要手动维护 kafka 偏移量
      "enable.auto.commit" -> (true: java.lang.Boolean)
    )

    // 创建 DStream，返回接收到的输入数据
    // LocationStrategies：根据给定的主题和集群地址创建 consumer
    // LocationStrategies.PreferConsistent：持续的在所有 Executor 之间分配分区
    // ConsumerStrategies：选择如何在 Driver 和 Executor 上创建和配置 Kafka Consumer
    // ConsumerStrategies.Subscribe：订阅一系列主题
    def getKafkaStream(topic: String, ssc: StreamingContext):
    InputDStream[ConsumerRecord[String, String]] = {
      val dStream: InputDStream[ConsumerRecord[String, String]] =
        KafkaUtils.createDirectStream[String, String](
          ssc,
          LocationStrategies.PreferConsistent,
          ConsumerStrategies.Subscribe[String, String](Array(topic), kafkaParam))
      dStream
    }
  }
}
