package com.offcn.bigdata.spark.p3

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 分组排序
  */
object _09GroupSortOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[1]")
            .setAppName(s"${_09GroupSortOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)

        val lines = sc.textFile("file:/E:/work/2020-0828期大数据/workspace/spark-parent-0828/data/topn.txt")

        val course2Info:RDD[(String, String)] = lines.map(line => {
            val course = line.substring(0, line.indexOf(" "))
            val info = line.substring(line.indexOf(" ") + 1)
            (course, info)
        })
        //分组
        val course2Infos:RDD[(String, Iterable[String])] = course2Info.groupByKey()
        println("-----------------------分组之后的内容-----------------------------")
        course2Infos.foreach{case (course, infos) => {
            println(s"course: ${course}, info: ${infos.mkString("[", ", ", "]")}")
        }}
        println("------------------------分组排序---------------------")
        //排序
        course2Infos.map{case (course, infos) => {
            val sorted = infos
                .map(line => (line.split("\\s+")(0), line.split("\\s+")(1).toDouble))
                .toList
//                .sortWith{case ((n1, s1), (n2, s2)) => s2.compareTo(s1) > 0}
                .sorted(new Ordering[(String, Double)](){
                override def compare(x: (String, Double), y: (String, Double)): Int = {
                    y._2.compareTo(x._2)
                }
            })
            (course, sorted.take(3))
        }}.foreach{case (course, infos) => {
            println(s"course: ${course}, info: ${infos.mkString("[", ", ", "]")}")
        }}

        sc.stop()
    }
}
