package com.xbai.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 练习：统计出每一个省份广告被点击次数的TOP3
  * 数据结构：时间戳，省份，城市，用户，广告，中间字段使用空格分割
  *
  * @author xbai
  * @Date 2020/12/31
  */
object Practice {

  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("create")
    val sc = new SparkContext(conf)
    val lineRDD: RDD[String] = sc.textFile("in/agent.log")
    // 按照最小力度聚合 ((Province, AD), 1)
    val proAdToOneRDD: RDD[((String, String), Int)] = lineRDD.map(line => {
      val items: Array[String] = line.split(" ")
      ((items(1), items(4)), 1)
    })
    // 计算每个省中每个广告被点击的总数((Province, AD), count)
    val proAdToCountRDD: RDD[((String, String), Int)] = proAdToOneRDD.reduceByKey(_ + _)
    // 将省作为 key， 广告加点击数作为 value：(Province, (AD, count))
    val proToAdCountRDD: RDD[(String, (String, Int))] = proAdToCountRDD.map(x => (x._1._1, (x._1._2, x._2)))
    // 将同一省份的所有广告进行聚合 (Province, List((AD1,count1),(AD2,count2)...))
    val proGroupRDD: RDD[(String, Iterable[(String, Int)])] = proToAdCountRDD.groupByKey()
    // 对同一省份的所有广告集合进行排序并取前三条，排序规则为广告点击总数
    val result: RDD[(String, List[(String, Int)])] = proGroupRDD.mapValues(x => {
      x.toList.sortWith((x, y) => x._2 > y._2).take(3)
    })
    result.collect().foreach(println)
    sc.stop()
  }
}
