package com.atguigu1.core.operator

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 *
 * @description: combineByKey案例
 * @time: 2021-03-12 11:45
 * @author: baojinlong
 **/
object Spark29PracticeDemo {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("rdd")
    // 设置rdd分区数字
    val sparkContext = new SparkContext(conf)
    // 1.获取原始数据,时间戳,省份,城市,用户,广告
    val dataRdd: RDD[String] = sparkContext.textFile("datas/agent.log")
    // 2.将原始数据进行结构转换,缺什么补什么,多什么,删什么 ((省份,广告),1)
    val mapRdd: RDD[((String, String), Int)] = dataRdd.map(
      line => {
        val nowData: Array[String] = line.split(" ")
        ((nowData(1), nowData(4)), 1)
      })
    // 3.将转换后的数据进行分组聚合((省份,广告),1) ((省份,广告1),sum)
    val reduceRdd: RDD[((String, String), Int)] = mapRdd.reduceByKey(_ + _)
    // 4.将聚合的结果进行结构转换((省份,广告),sum)=>(省份,(广告1,sum))
    val newMapRdd: RDD[(String, (String, Int))] = reduceRdd.map {
      case ((province, advertise), sum) =>
        (province, (advertise, sum))

    }
    // 5.将转换结构后数据根据省份进行分组(省份,[(广告1,sum),(广告2,sum),(广告3,sum)])
    val groupByKeyRdd: RDD[(String, Iterable[(String, Int)])] = newMapRdd.groupByKey()
    // 6.将分组后的数据进行组内降序取Top3
    val resultRdd: RDD[(String, List[(String, Int)])] = groupByKeyRdd.mapValues(
      values => {
        values.toList.sortBy(_._2)(Ordering.Int.reverse).take(3)
      })
    resultRdd.collect.foreach(println)
    // 7.采集数据打印在控制台
    sparkContext.stop()
  }
}
