package com.dxf.bigdata.D05_spark_again

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 1516609143869 2 8 92 9
 * 时间戳 省份 城市 用户 广告
 *
 *
 */
object 每个省每个广告点击前三 {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName("top3")
    conf.set("spark.port.maxRetries", "100")
    val sc = new SparkContext(conf)

    val dataRDD: RDD[String] = sc.textFile("datas/agent.log")


    //    val mapRDD: RDD[(String, (String, Int))] = dataRDD.map(line => {
    //      val data: Array[String] = line.split(" ")
    //      (data(1), (data(4), 1))
    //    })

    val mapRDD: RDD[((String, String), Int)] = dataRDD.map(
      line => {
        val datas: Array[String] = line.split(" ")
        ((datas(1), datas(4)), 1)
      }
    )

    val reduceRDD: RDD[((String, String), Int)] = mapRDD.reduceByKey(_ + _)

// map 传入方法方式
//    val newMapRDD: RDD[(String, (String, Int))] = reduceRDD.map(
//      x => {
//        (x._1._1, (x._1._2, x._2))
//      } //
//    )

    //map 模式匹配方式
    val newMapRDD: RDD[(String, (String, Int))] = reduceRDD.map{
      case ((a,b),c) => (a,(b,c))
    }


    val groupRDD: RDD[(String, Iterable[(String, Int)])] = newMapRDD.groupByKey()

    groupRDD.mapValues{
//      case iter => iter.toList.sortBy(x=>x._2)(Ordering.Int.reverse).take(3)
      case iter => iter.toList.sortBy(-_._2).take(3)
    }.collect().foreach(println)

    sc.stop()
  }

}
