package com.atguigu.realtime.app

import com.atguigu.realtime.bean.AdsInfo
import com.atguigu.realtime.util.{MyApp, RedisUtil}
import org.apache.spark.streaming.Minutes
import org.apache.spark.streaming.dstream.DStream
import org.json4s.jackson.JsonMethods
import redis.clients.jedis.Jedis

/**
 * description ：每天每地区热门广告 Top3
 * author      ：剧情再美终是戏 
 * mail        : 13286520398@163.com
 * date        ：Created in 2020/1/17 10:05
 * modified By ：
 * version:    : 1.0
 */
object AreaAdsClickTop3App extends MyApp {


  override def doSomething(adsInfo: DStream[AdsInfo]): Unit = {

    // 转换格式 (2020-1-17 华南 广告id1) 1,  (2020-1-17 华南 广告id2) 1 --> reduceBykey
    val dayStringAndAreaAndAdsOne: DStream[((String, String, String), Int)] = adsInfo.window(Minutes(1440)).map(ai => ((ai.dayString, ai.area, ai.adsId), 1))
        dayStringAndAreaAndAdsOne.print()

    // 2020-1-17 华南 广告id1 count, 2020-1-17 华南 广告id2 count... --> map((2020-1-17,华南), (广告id, count)
        val dayStringAndAreaAndAdsCount: DStream[((String, String, String), Int)] = dayStringAndAreaAndAdsOne.reduceByKey(_ + _) // TODO 如果前面使用了window就不需要了
//    val dayStringAndAreaAndAdsCount: DStream[((String, String, String), Int)] = dayStringAndAreaAndAdsOne // TODO 需要保存状态，所以需要使用updateStateByKey
//      .updateStateByKey((seq: Seq[Int], option: Option[Int]) => Some((0 /: seq) (_ + _) + option.getOrElse(0)))

    //  map((2020-1-17,华南), (广告id, count) --> groupby (2020-1-17,华南) 按点击次数倒序，取top3
    val dayStringAreaAndAdsidCount: DStream[((String, String), (String, Int))] = dayStringAndAreaAndAdsCount.map {
      case ((dayString, area, adsid), count) => ((dayString, area), (adsid, count))
    }

    //  groupby (2020-1-17,华南) 按点击次数倒序，取top3
    val result: DStream[((String, String), List[(String, Int)])] = dayStringAreaAndAdsidCount
      .groupByKey
      .mapValues(adsCount => adsCount.toList.sortBy(_._2)(Ordering.Int.reverse).take(3))

    // 将结果写入redis
    // redis hset
    // key  --> area:das:top3:2020-01-17   field --> 华东 value --> List((3,387), (4,372), (5,370))

    // 转换结构
    val resultConvert: DStream[(String, Iterable[(String, List[(String, Int)])])] = result.map {
      case ((dayString, area), it) => (dayString, (area, it))
    }.groupByKey

    //    resultConvert.print() (2020-01-17,ArrayBuffer((华东,List((3,10), (5,8), (2,8))), (华北,List((2,10), (3,9), (1,8))), (华南,List((4,15), (5,14), (3,12))), (华中,List((4,4), (5,4), (1,2)))))
    resultConvert.foreachRDD {
      rdd =>
        val redisClient: Jedis = RedisUtil.getJedisClient
        println("--------------------------------------------------------------")
        rdd.collect().foreach {
          case (dayString, it) =>
            it.foreach {
              case (area, li: Seq[(String, Int)]) =>
                import org.json4s.JsonDSL._
                println("area:day:top3:" + dayString, area, JsonMethods.compact(JsonMethods.render(li)))
                redisClient.hset("area:day:top3:" + dayString, area, JsonMethods.compact(JsonMethods.render(li)))
            }
        }
        redisClient.close()
    }
  }
}

/*
每天每地区热门广告 Top3
(2020-1-17 华南 广告id1) 1,  (2020-1-17 华南 广告id2) 1 --> reduceBykey
2020-1-17 华南 广告id1 count, 2020-1-17 华南 广告id2 count... --> groupby (2020-1-17,华南) 按点击次数倒序，取top3
2020-1-17 华南 ((广告id1,100),(广告id2,90),....)
 */
