package com.study.bigdata.spark.core.rdd.operator.transform

import org.apache.spark.{SparkConf, SparkContext}

object Scala21_RDD_Rep_1{
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster( "local[*]").setAppName("RDD")
    val sc = new SparkContext(conf)
    // TODO 统计出每一个省份每个广告被点击数量排行的Top3 先做数据的聚合减少数据量再分组统计
    // TODO 1.读取数据文件，获取原始数据
    // 1516609143867 6 7 64 16
    val lines = sc.textFile("data/agent.log")
    // TODO 2.将原始数据进行结构转换
    // line => (省份,(广告,1))
    val wordToOne = lines.map(
      line => {
        val data = line.split(" ")
        (data(1), (data(4), 1))
      }
    )
    val groupRDD = wordToOne.groupByKey()
    val top3 = groupRDD.mapValues(
      iter => {
        val wordCountMap = iter.groupBy(_._1).mapValues(_.size)
        wordCountMap.toList.sortBy(_._2)(Ordering.Int.reverse).take(3)

      }
    )
    top3.collect().foreach(println)

    sc.stop()

  }

}
