package com.atguigu0.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @description: xxx
 * @time: 2020/6/11 14:41
 * @author: baojinlong
 **/
object RddExercise {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("myWordCount").setMaster("local[*]")
    // 创建sparkContext对象,转换操作是懒执行的,并不会马上触发job
    val sc: SparkContext = new SparkContext(sparkConf)
    // 读取文件(agent.log):时间戳TS,身份Pro,城市City,用户User,广告Ad
    val line: RDD[String] = sc.textFile("E:/test-data/input/agent.log")
    // 提取省份和广告字段:((pro,ad),1)
    val proAdToOne: RDD[((String, String), Int)] = line.map(x => {
      val fields: Array[String] = x.split(" ")
      ((fields(1), fields(4)), 1)
    })
    // 求出每个身份每个广告被点击的次数:((pro,ad),Count)
    val proAdToCount: RDD[((String, String), Int)] = proAdToOne.reduceByKey(_ + _)
    // 扩大粒度范围(pro,(ad,count)
    val proToADCount: RDD[(String, (String, Int))] = proAdToCount.map(x => (x._1._1, (x._1._2, x._2)))
    // 按照省份进行分组:(pro,List[(ad,count),(),()...])
    val proToADCountList: RDD[(String, Iterable[(String, Int)])] = proToADCount.groupByKey
    // 排序
    val result: RDD[(String, List[(String, Int)])] = proToADCountList.mapValues(x => {
      x.toList.sortWith((a, b) => a._2 > b._2).take(3)
    })
    // 打印数据
    result.collect.foreach(println)
    sc.stop()
    println("ok-end")
  }
}
