package com.zt.bigdata.spark.dataalgorithms.chapter01

import java.util

import com.zt.bigdata.template.spark.BasicTemplate
import org.apache.spark.rdd.RDD

/**
  *
  */
class SecondarySortUsingCombineByKey extends
  BasicTemplate[Parameter] {
  override def process(parameter: Parameter): Unit = {
    val inputFile = parameter.inputFile
    val spark = buildSparkSession(parameter)
    val input = spark.sparkContext.textFile(inputFile, 1)

    //------------------------------------------------
    // each input line/record has the following format:
    // name, time, value
    //-------------------------------------------------
    //x,2,9 ->  (("x" -> (2 -> 9))
    val valueToKey = input.map(x => {
      val line = x.split(",")
      (line(0), (line(1).toInt, line(2).toInt))
    })
    valueToKey.collect().foreach(x => println(s"${x._1},${x._2._1},${x._2._2}"))

    val combined: RDD[(String, util.SortedMap[Int, Int])] =
      valueToKey.combineByKey[util.SortedMap[Int, Int]](
        (x: (Int, Int)) => {
          val map = new util.TreeMap[Int, Int]
          map.put(x._1, x._2)
          map
        },
        (m1: util.SortedMap[Int, Int], x: (Int, Int)) => {
          m1.put(x._1, x._2)
          m1
        },
        (m1: util.SortedMap[Int, Int], m2: util.SortedMap[Int, Int]) => {
          if (m1.size() < m2.size())
            DataStructures.merge(m1, m2)
          else DataStructures.merge(m2, m1)
        }
      )

    combined.collect().foreach {
      x =>
        println(s"name=${x._1} , map=${x._2}")
    }

    spark.stop()
  }
}
