package com.zt.bigdata.spark.dataalgorithms.chapter03

import com.zt.bigdata.template.spark.BasicTemplate

import scala.collection.SortedMap

/**
  *
  */
class Top10NonUnique extends BasicTemplate[Parameter] {
  override def process(parameter: Parameter): Unit = {
    val inputFile = parameter.inputFile
    val spark = buildSparkSession(parameter)

    val topN = spark.sparkContext.broadcast[Int](parameter.topN)

    val input = spark.sparkContext.textFile(inputFile)

    //------------------------------------------------
    // each input line/record has the following format:
    // name, time, value
    //-------------------------------------------------
    //x,2,9 ->  (("x" -> (2 -> 9))
    val rdd = input.coalesce(9)

    val kv = rdd.map(x => {
      val line = x.split(",")
      (line(0), line(1).toInt)
    })

    val uniqueKeys = kv.reduceByKey(_ + _)
    uniqueKeys.collect().foreach(println)

    val partitions = uniqueKeys.mapPartitions(rdd => {
      var localTopN = SortedMap.empty[Int, String]
      rdd.foreach {
        kv =>
          localTopN += kv.swap
          if (localTopN.size > topN.value)
            localTopN = localTopN.takeRight(topN.value)
      }
      localTopN.iterator
    })

    val finalTopN = SortedMap.empty[Int, String]
      .++:(partitions.collect())
      .takeRight(topN.value)

    finalTopN.foreach(println)


    //快速方式
    val createCombiner = (v: Int) => v
    val mergeValue = (a: Int, b: Int) => (a + b)
    val moreConciseApproach = kv.combineByKey(createCombiner, mergeValue, mergeValue)
      .map(_.swap)
      .groupByKey()
      .sortByKey(false)
      .take(topN.value)

    //Prints result (top 10) on the console
    moreConciseApproach.foreach {
      case (k, v) => println(s"$k \t ${v.mkString(",")}")
    }
  }
}
