package com.atbeijing.bigdata.spark.core.rdd.operator.transform

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

object Spark23_Oper_Req_1 {

    def main(args: Array[String]): Unit = {

        val conf = new SparkConf().setMaster("local[*]").setAppName("TransformOperator")
        val sc = new SparkContext(conf)

        val startTime = System.currentTimeMillis()
        // TODO 需求 ： 统计出每一个省份每个广告被点击数量排行的Top3

        val rdd : RDD[String] = sc.textFile("data/agent.log")

        val mapRDD : RDD[(String, Int)] = rdd.map(
            line => {
                val datas = line.split(" ")
                (datas(1) + "-" + datas(4), 1)
            }
        )

        val reduceRDD : RDD[(String, Int)] = mapRDD.reduceByKey(_+_)

        val mapRDD1 = reduceRDD.map {
            case ( key, sum ) => {
                val ks = key.split("-")
                (ks(0), (ks(1), sum))
            }
        }

        // 问题1：groupByKey性能低
        // 问题2：将分布式数据进行单点排序，有可能程序跑不通
//        val groupRDD: RDD[(String, Iterable[(String, Int)])] = mapRDD1.groupByKey()
//        val top3 = groupRDD.mapValues(
//            iter => {
//                iter.toList.sortBy(_._2)(Ordering.Int.reverse).take(3)
//            }
//        )
        val top3 = mapRDD1.aggregateByKey(ArrayBuffer[(String, Int)]())(
            (buffer, t) => {
                buffer.append(t)
                buffer.sortBy(_._2)(Ordering.Int.reverse).take(3)
            },
            (buff1, buff2) => {
                buff1.appendAll(buff2)
                buff1.sortBy(_._2)(Ordering.Int.reverse).take(3)
            }
        )

        top3.collect().foreach(println)

        sc.stop()
        println(System.currentTimeMillis() - startTime)
    }

}
