package com.larry.spark.rdd.transform

import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

object RDD_Oper_Test_4 {

  def main(args: Array[String]): Unit = {
    //TODO  使用spark 省份广告点击top3 先统计 后分组

    val conf = new SparkConf().setMaster("local[*]").setAppName("req")
    val sc = new SparkContext(conf)

    val lines = sc.textFile("data/agent.log")
    //1516609143867 6 7 64 16
    //((6,16),1)
    val datat = lines.map(
      l => {
        val data = l.split(" ")
        ((data(1), data(4)), 1)
      }
    )
    //((6,16),10)
    val datasum = datat.reduceByKey(_ + _)
    //(6,(16,10))
    val dataFormat = datasum.map {
      case ((p, c), sum) => {
        (p, (c, sum))
      }
    }

    val data = dataFormat.aggregateByKey(ArrayBuffer[(String, Int)]())(
      (buff, t) => {  //分区内排序取前三
        buff.append(t)
        buff.sortBy(_._2)(Ordering.Int.reverse).take(3)
      },
      (
        (buff1, buff2) => {   //分区间再排序,再区前三
          buff1.appendAll(buff2)
          buff1.sortBy(_._2)(Ordering.Int.reverse).take(3)
        }
        )
    )

    data.collect().foreach(println)
    sc.stop()
  }

}
