import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.text.SimpleDateFormat
import java.util.Date

/**
 * 7.2	每天每地区热门广告Top3
 */
object Request1 {

  def main(args: Array[String]): Unit = {
    // 创建Spark配置文件对象
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("Request1")
    // 创建StreamingContext
    val ssc: StreamingContext = new StreamingContext(conf, Seconds(3))
    // 设置检查点
    ssc.checkpoint("D:\\IdeaProjects\\spark-api\\spark-api-realtime\\checkpoint")

    // kafka参数声明
    val brokers: String = "hadoop102:9092,hadoop103:9092,hadoop104:9092"
    val topic: String = "my-ads-bak"
    val group: String = "bigdata"
    val deserialization: String = "org.apache.kafka.common.serialization.StringDeserializer"
    val kafkaParams: Map[String, String] = Map(
      ConsumerConfig.GROUP_ID_CONFIG -> group,
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> brokers,
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> deserialization,
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> deserialization
    )

    // 创建DS
    val kafkaDS: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, Set(topic))
    // 测试kafka中消费数据
    val dataDS: DStream[String] = kafkaDS.map(_._2)
    // 打印输出
    dataDS.print()

    //==========需求一实现： 每天每地区热门广告
    // msg = 1584271384370,华南,广州,100,1==========
    val mapDS: DStream[(String, Int)] = dataDS.map {
      line => {
        val fields: Array[String] = line.split(",")
        // 日期格式化成年月日格式
        val timestamp: Long = fields(0).toLong
        val date: Date = new Date(timestamp)
        val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
        val dateString: String = sdf.format(date)
        // 区域
        val area: String = fields(1)
        // 广告id
        val adv: String = fields(4)
        (dateString + "_" + area + "_" + adv, 1)
      }
    }

    // 将转换结构后的数据进行聚合(天_地区_广告, 点击次数sum)
    // 这里要统计每天数据，所以要把每个采集周期的数据都统计，需要保存状态，使用updateStateByKey
    val updateDS: DStream[(String, Int)] = mapDS.updateStateByKey {
      (seq: Seq[Int], buffer: Option[Int]) => {
        Option(buffer.getOrElse(0) + seq.sum)
      }
    }

    // 将聚合后的数据进行结构的转换(天_地区,(广告,点击次数sum))
    val mapDS1: DStream[(String, (String, Int))] = updateDS.map {
      case (str, sum) =>
        val strings: Array[String] = str.split("_")
        (strings(0) + "_" + strings(1), (strings(2), sum))
    }

    // 按照天_地区对数据进行分组
    val groupDS: DStream[(String, Iterable[(String, Int)])] = mapDS1.groupByKey()

    // 对分组后的数据降序取前3
    val resDS: DStream[(String, List[(String, Int)])] = groupDS.mapValues {
      data =>
        data.toList
          .sortWith((tuple1, tuple2) => tuple1._2 > tuple2._2)
          .take(n = 3)
    }

    resDS.print()

    ssc.start()
    ssc.awaitTermination();
  }
}
