package com.atguigu.sparkcore.day02.kv

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * Author atguigu
 * Date 2020/10/28 16:12
 */
object RDDPractice {
    def main(args: Array[String]): Unit = {
        val conf: SparkConf = new SparkConf().setAppName("RDDPractice").setMaster("local[2]")
        val sc: SparkContext = new SparkContext(conf)
        // 读文件
        val lineRDD = sc.textFile("c:/agent.log")
        // map 操作
        val proAdsAndOne = lineRDD.map(line => {
            val split: Array[String] = line.split(" ")
            ((split(1), split(4)), 1)
        })
        // 聚合
        val proAdsAndCount: RDD[((String, String), Int)] = proAdsAndOne.reduceByKey(_ + _)
        // map
        val proAndAdsCount = proAdsAndCount.map {
            case ((pro, ads), count) => (pro, (ads, count))
        }
        // 分组
        val proAndAdsCountGrouped: RDD[(String, Iterable[(String, Int)])] = proAndAdsCount.groupByKey()
        
        val result = proAndAdsCountGrouped.mapValues((it: Iterable[(String, Int)]) => {
            //            it.toList.sortBy(_._2)(Ordering.Int.reverse)
            it.toList.sortBy(-_._2).take(3)  //OOM
        }).sortBy(_._1.toInt)
        println(result.dependencies)
        result.collect.foreach(println)
        Thread.sleep(100000)
        sc.stop()
        
    }
}

/*

倒推法:
=> RDD[line]  map
=> RDD[(省份, 广告1),1))]  reduceByKey
=> RDD[(省份, 广告1),1000))]   map
=> RDD[(省份, (广告1,1000))]   groupByKey
RDD[(省份 -> List(广告1->1000, 广告10->200, ...)), (...)]


*/