package com.gis.bigdata.spark.core.rdd.operator.transform

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author LnnuUser
 * @create 2022-07-23-下午6:22
 */
object Spark24_RDD_2 {

  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setMaster("local").setAppName("spark24")
    val sc: SparkContext = new SparkContext(sparkConf)
    // 1516609143867 6 7 64 16
    val fileRDD: RDD[String] = sc.textFile("/home/lnnu/IdeaProjects/spark/datas/agent.log")
    // TODO 2 : [prov, ad, nums]
    val mapRDD: RDD[((String, String), Int)] = fileRDD.map(line => {
      val datas: Array[String] = line.split(" ")
      ((datas(1), datas(4)), 1)
    })

    // TODO 3 : [(prov, ad), nums]
    val reduceRDD: RDD[((String, String), Int)] = mapRDD.reduceByKey(_ + _)

    // TODO 4 : [prov, (ad, nums)]
    val newReduceRDD: RDD[(String, (String, Int))] = reduceRDD.map {
      case ((prev, ad), nums) => {
        (prev, (ad, nums))
      }
    }

    // TODO 5 : 根据省份分组排序前三
    val groupRDD: RDD[(String, Iterable[(String, Int)])] = newReduceRDD.groupByKey()
    val resultRDD: RDD[(String, List[(String, Int)])] = groupRDD.mapValues(value => {
      value.toList.sortBy(_._2)(Ordering.Int.reverse).take(3)
    }).sortByKey()

    resultRDD.collect().foreach(println)

    sc.stop()

  }

}
