package com.bdqn.spark.chapter05.practise

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Spark24_RDD_Req {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("advert-test")
    val sc = new SparkContext(sparkConf)

    // 获取数据源
    // 时间戳			省份	城市	用户	广告，中间字段使用空格分隔。
    // 1516609143867 6 7 64 16
    val fileRDD = sc.textFile("input/agent.log")

    // 对数据进行转换
    // -> ((省份,广告),1)
    val provinceAdvertRDD: RDD[((String, String), Int)] = fileRDD.map(line => {
      val fields = line.split(" ")
      ((fields(1), fields(4)), 1)
    })

    // 按照(省份,广告)分组，把所有的1加在一起
    // ((省份,广告),sum)
    val provinceAdvertSumRDD: RDD[((String, String), Int)] =
    provinceAdvertRDD.reduceByKey(_ + _)

    // ((省份,广告),sum) -> (省份,(广告,sum))
    val provinceRDD: RDD[(String, (String, Int))] = provinceAdvertSumRDD.map {
      case ((pro, adv), sum) => {
        (pro, (adv, sum))
      }
    }

    // 分组，聚集
    // (省份,(广告,sum)) -> (省份,[(广告,sum),(广告,sum),(广告,sum)(广告,sum)])
    // (省份,(广告,sum)) -> (省份,[(广告,sum),(广告,sum),(广告,sum)(广告,sum)])
    // (省份,(广告,sum)) -> (省份,[(广告,sum),(广告,sum),(广告,sum)(广告,sum)])
    val provinceGroupedRDD: RDD[(String, Iterable[(String, Int)])] =
    provinceRDD.groupByKey()

    val resultRDD: RDD[(String, List[(String, Int)])] = provinceGroupedRDD.mapValues {
      iter => {
        iter.toList.sortBy(_._2)(Ordering.Int.reverse).take(3)
      }
    }

    resultRDD.collect().foreach(println)
    sc.stop()
  }
}
