package com.spark.mooc.ch5_rdd.part04_cases

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @description:
 *              任务要求：
 *                  对于一个给定的文件（数据如file1.txt所示），请对数据进行排序，首先根据第1列数据降序排序，
 *                  如果第1列数据相等，则根据第2列数据降序排序
 *              思路：
 *                  1.按照Ordered和Serializable接口实现自定义排序的key
 *                  2.将要进行二次排序的文件加载进来生成<key,value>类型的RDD
 *                  3.使用sortByKey基于自定义的key进行二次排序
 *                  4.去掉排序的key只保留排序的结果
 * @time: 2020/11/26 23:50
 * @author: lhy
 */
class SecondarySortKey(val first:Int,val second:Int) extends Ordered[SecondarySortKey] with Serializable {
    override def compare(that: SecondarySortKey): Int = {
        if (this.first - that.first != 0)
            this.first - that.first
        else
            this.second - that.second
    }
}
object SecondarySortApp{
    def main(args: Array[String]): Unit = {
        val conf: SparkConf = new SparkConf().setAppName("SecondarySortApp").setMaster("local")
        val sc = new SparkContext(conf)
        val lines: RDD[String] = sc.textFile("input/rdd/secondarySort/file1.txt",1)
//        val pairWithSortKey: RDD[(SecondarySortKey, String)] = lines.map(line => (
//                        new SecondarySortKey(line.split(" ")(0).toInt,line.split(" ")(1).toInt),line))
//        val sorted: RDD[(SecondarySortKey, String)] = pairWithSortKey.sortByKey(ascending = false)
//        val sortedResult: RDD[String] = sorted.map(sortedLine => sortedLine._2)
//        sortedResult.collect().foreach(println)
        val results: Array[String] = lines.map(line =>{
            val temp: Array[String] = line.split(" ")
            (new SecondarySortKey(temp(0).toInt,temp(1).toInt),line)
        }).sortByKey(ascending = false).map(_._2).collect()
        results.foreach(println)
    }
}

