package com.atbeijing.bigdata.spark.mytest.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

object Spark_WordCount {
  def main(args: Array[String]): Unit = {
    //时间戳，省份，城市，用户，广告，中间字段使用空格分隔。
    //统计出每一个省份每个广告被点击数量排行的Top3
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("word")
    val sc = new SparkContext(conf)
    val lines: RDD[String] = sc.textFile("data/agent.log")
    //先统计聚合,再分组

    //转换格式 (省份-广告,1)
    val r1: RDD[(String, Int)] = lines.map(line => {
      val words: Array[String] = line.split(" ")
      (words(1) + "-" + words(4), 1)
    })

    //聚合相同的省份-广告 (省份-广告,sum)
    val r2: RDD[(String, Int)] = r1.reduceByKey(_ + _)

    //改变结构 (省份,(广告,sum))
    val r3: RDD[(String, (String, Int))] = r2.map(w => {
      val strings: Array[String] = w._1.split("-")
      (strings(0), (strings(1), w._2))
    })

    //根据省份分组
//    val r4: RDD[(String, Iterable[(String, Int)])] = r3.groupByKey()

    //集合排序取前三 单点操作
//    val r5: RDD[(String, List[(String, Int)])] = r4.map(w => {
//      val list: List[(String, Int)] = w._2.toList.sortBy(s => s._2)(Ordering.Int.reverse).take(3)
//      (w._1, list)
//    })

    val top3 = r3.aggregateByKey(ArrayBuffer[(String, Int)]())(
      (buffer, t) => {
        //分区内规则:将相同key的value放入数组 并倒序取前三 返回ArrayBuffer[(String, Int)]
        buffer.append(t)
        buffer.sortBy(_._2)(Ordering.Int.reverse).take(3)
      },
      (buff1, buff2) => {
        //分区间规则:将相同key数组融合
        buff1.appendAll(buff2)
        buff1.sortBy(_._2)(Ordering.Int.reverse).take(3)
      }
    )

    top3.collect().foreach(println)






  }
}
