package top.jolyoulu.core.rdd.operator.transform

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author: JolyouLu
 * @Date: 2024/2/7 20:51
 * @Description
 */
object Spark024_RDD_Req1 {
  def main(args: Array[String]): Unit = {
    //准备环境 [*]:表示使用当前系统最大核
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("RDD")
    val sc: SparkContext = new SparkContext(sparkConf)
    //案例 agent.log ： 时间戳、省份、城市、用户、广告
    val path: String = this.getClass.getClassLoader.getResource("datas/agent.log").toURI.getPath
    val dataRdd: RDD[String] = sc.textFile(path)
    //数据过滤加结构转换 => ((省份，广告)，1)
    val mapRDD: RDD[((String, String), Int)] = dataRdd.map(
      line => {
        val datas: Array[String] = line.split(" ")
        ((datas(1), datas(4)), 1)
      }
    )
    //数据分组聚合 => ((省份，广告)，sum)
    val reduceRDD: RDD[((String, String), Int)] = mapRDD.reduceByKey(_ + _)
    //数据转换 => (省份，(广告，sum))
    val newMapRDD: RDD[(String, (String, Int))] = reduceRDD.map {
      //模式匹配
      case ((prv, ad), sum) => {
        (prv, (ad, sum))
      }
    }
    //根据省份分组 => (省份，[(广告A，sum),(广告B，sum)])
    val groupRDD: RDD[(String, Iterable[(String, Int)])] = newMapRDD.groupByKey()
    //组内排序降序，取前3
    val resultRDD: RDD[(String, List[(String, Int)])] = groupRDD.mapValues(
      iter => {
        iter.toList.sortBy(_._2)(Ordering.Int.reverse).take(3)
      }
    )
    //采集数据打印
    resultRDD.collect().foreach(println)
    //关闭环境
    sc.stop()
  }

}
