package com.atguigu.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object TestPratice {
  def main(args: Array[String]): Unit = {
    // 创建SparkConf并设置App名称
    val conf: SparkConf = new SparkConf().setAppName("WC").setMaster("local[*]")

    // 创建SparkContext，该对象是提交Spark App的入口
    val sc: SparkContext = new SparkContext(conf)
    sc.setLogLevel("ERROR")

    // 读取文件
    val rdd: RDD[String] = sc.textFile("")

    // 找到最小颗粒度
    // 时间戳，省份，城市，用户，广告
    val provinceAdAndCount: RDD[((String, String), Int)] = rdd.map(x => {
      val str: Array[String] = x.split(" ")
      val province: String = str(1)
      val ad: String = str(4)
      ((province, ad), 1)
    })

    // 聚合求sum
    val provinceAdAndSum: RDD[((String, String), Int)] = provinceAdAndCount.reduceByKey(_ + _)

    // 分组排序,取top3
    val groupRdd: RDD[(String, Iterable[(String, Int)])] = provinceAdAndSum.map(
      x => {
        (x._1._1, (x._1._2, x._2))
      }
    ).groupByKey()

    val result: RDD[(String, List[(String, Int)])] = groupRdd.mapValues(
      x => {
        x.toList.sortWith((x, y) => x._2 > y._2).take(3)
      }
    )

    // 关闭资源
    sc.stop()
  }

}
