package com.zt.bigdata.spark.dataalgorithms.chapter01

import com.zt.bigdata.template.spark.BasicTemplate

/**
  *
  */
class SecondarySortUsingGroupByKey extends
  BasicTemplate[Parameter] {
  override def process(parameter: Parameter): Unit = {
    val inputFile = parameter.inputFile
    val spark = buildSparkSession(parameter)
    val input = spark.sparkContext.textFile(inputFile)

    //------------------------------------------------
    // each input line/record has the following format:
    // name, time, value
    //-------------------------------------------------
    //x,2,9 ->  (("x" -> (2 -> 9))
    val valueToKey = input.map(x => {
      val line = x.split(",")
      (line(0), (line(1).toInt, line(2).toInt))
    })
    valueToKey.collect().foreach(x => println(s"${x._1},${x._2._1},${x._2._2}"))
    val group = valueToKey.groupByKey()

    group.collect().foreach {
      x =>
        println(x._1)
        x._2.foreach(xx => println(s"${xx._1},${xx._2}"))
    }

    val sorted = group.mapValues {
      iter =>
        iter.toList.sortBy(x => x._1)
    }

    sorted.collect().foreach {
      x =>
        println(x._1)
        x._2.foreach(xx => println(s"${xx._1},${xx._2}"))
    }

    spark.stop()
  }
}
