package com.offcn.bigdata.spark.p1.p3

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.reflect.ClassTag

/**
  * Spark的普通排序：
  *     sortByKey
  *     sortBy
  *     takeOrdered
  */
object _06SortOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[*]")
            .setAppName(s"${_06SortOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)
        case class Student(id: Int, name: String, age: Int, height: Double)

        val list = List(
            Student(1, "林博", 18, 180),
            Student(2, "单松", 19, 150),
            Student(3, "张皓", 20, 120),
            Student(4, "王建", 20, 119),
            Student(106, "冯岩", 30, 10086)
        )
        val rdd = sc.parallelize(list)
        //按照学生的年龄进行排序
        val age2Stu:RDD[(Int, Student)] = rdd.map(stu => (stu.age, stu))

        age2Stu.sortByKey(ascending = false, numPartitions = 1).foreach(println)
        println("------------------排序操作------------------------")
        rdd.sortBy(stu => stu.age, ascending = true, numPartitions = 1)(
            new Ordering[Int](){
                override def compare(x: Int, y: Int): Int = {
                    x.compareTo(y)
                }
            },
            ClassTag.Int.asInstanceOf[ClassTag[Int]]
        ).foreach(println)
        sc.stop()
    }
}