package com.zt.bigdata.spark.dataalgorithms.chapter03


import com.zt.bigdata.template.spark.BasicTemplate

import scala.collection.SortedMap

/**
  *
  */
class Top10 extends BasicTemplate[Parameter] {
  override def process(parameter: Parameter): Unit = {
    val inputFile = parameter.inputFile
    val spark = buildSparkSession(parameter)
    val input = spark.sparkContext.textFile(inputFile)

    //------------------------------------------------
    // each input line/record has the following format:
    // name, time, value
    //-------------------------------------------------
    //x,2,9 ->  (("x" -> (2 -> 9))
    val valueToKey = input.map(x => {
      val line = x.split(",")
      (line(1).toInt, Array(line(0)))
    })
    val partitions = valueToKey.mapPartitions(rdd => {
      var localTopN = SortedMap.empty[Int, Array[String]]

      rdd.foreach {
        kv => {
          localTopN += kv
          if (localTopN.size > parameter.topN)
            localTopN = localTopN.takeRight(parameter.topN)
        }
      }
      localTopN.iterator
    })

    val finalTopN = SortedMap.empty[Int, Array[String]].++:(partitions.collect())
      .takeRight(parameter.topN)

    println("Top10")
    finalTopN.foreach {
      case (k, v) => println(s"$k \t ${v.asInstanceOf[Array[String]].mkString(",")}")
    }

    val moreConciseApproach = valueToKey
      .groupByKey()
      .sortByKey(false)
      .take(parameter.topN)
    println("moreConciseApproach")
    moreConciseApproach.foreach {
      case (k, v) => println(s"$k \t ${v.flatten.mkString(",")}")
    }
  }
}
