package com.catmiao.spark.rdd.operator.transform

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @title: RDD_Operator_01_transform
 * @projectName spark_study
 * @description: TODO
 * @author ChengMiao
 * @date 2024/2/2 11:48
 */
object RDD_Operator_22_transform_last {

  def main(args: Array[String]): Unit = {
    val sparkCon = new SparkConf().setMaster("local[*]").setAppName("rdd")

    val sparkContext = new SparkContext(sparkCon)


    /**
     * 1516609143867 6 7 64 16
     * 时间戳 省份 城市 用户 广告
     *
     * 每个省份 每个广告 被点击数量排行 top3
     */

    val rdd: RDD[String] = sparkContext.textFile("datas/agent.log")


    // 1.   (  (省份,广告) , 1)
    val rdd1: RDD[((String, String), Int)] = rdd.map(
      line => {
        val data = line.split(" ")

        ((data(1), data(4)), 1)
      }
    )


    // 2.  分组聚合 ( (省份，广告) , 总数)
    val rdd2: RDD[((String, String), Int)] = rdd1.reduceByKey(_ + _)

    // 3.  结构转换 ( 省份, (广告,总数) )
    val rdd3: RDD[(String, (String, Int))] = rdd2.map(
      i => {
        (i._1._1, (i._1._2, i._2))
      }
    )

    // 4. 根据省份分组  (省份, [(广告,总数),(广告,总数)])
    val rdd4: RDD[(String, Iterable[(String, Int)])] = rdd3.groupByKey()

    val rdd5: RDD[(String, List[(String, Int)])] = rdd4.mapValues(
      iter => {
        iter.toList.sortBy(_._2)(Ordering.Int.reverse).take(3)
      }
    )


    rdd5.collect().foreach(println)


    sparkContext.stop()
  }

}
