package com.zt.bigdata.spark.dataalgorithms.chapter01

import com.zt.bigdata.template.spark.BasicTemplate
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD

/**
  *
  */
class SecondarySortUsingRepartitionAndSortWithinPartitions extends BasicTemplate[Parameter] {
  override def process(parameter: Parameter): Unit = {

    val inputFile = parameter.inputFile

    val spark = buildSparkSession(parameter)
    val input = spark.sparkContext.textFile(inputFile)

    //------------------------------------------------
    // each input line/record has the following format:
    // <id><,><time><,><value>
    //-------------------------------------------------
    //2012,01,01,52 ->  (("2012-01" -> 01) -> 52)  //排序结果与52无关
    val valueToKey = input.map(x => {
      val line = x.split(",")
      ((line(0) + "-" + line(1), line(3).toInt), line(3).toInt)
    })

    implicit def tupleOrderingDesc = new Ordering[(String, Int)] {
      override def compare(o1: (String, Int), o2: (String, Int)): Int = {
        if (o2._1.compare(o1._1) == 0) o2._2.compare(o1._2)
        else o2._1.compare(o1._1)
      }
    }

    val sorted: RDD[((String, Int), Int)] = valueToKey.repartitionAndSortWithinPartitions(new CustomPartitioner(parameter.partitions))

    val result = sorted.map {
      case kv => (kv._1._1, kv._2)
    }

    result.foreachPartition {
      rdd =>
        rdd.foreach(x => println(s"${x._1} : ${x._2}"))
    }

    // done
    spark.stop()
  }
}
