package com.offcn.spark.p4

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Auther: BigData-LGW
 * @ClassName: SparkSort
 * @Date: 2020/12/8 21:22
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object SparkSort {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setAppName("SparkSort")
            .setMaster("local[2]")
        val sc = new SparkContext(conf)

        val stus = sc.parallelize(List(
            Student("卫超", 19, 187),
            Student("王礼鹏", 29, 177),
            Student("乌欣欣", 18, 168),
            Student("陈延年", 19, 157),
            Student("刘龙沛", 20, 175)
        ))
//        sbk(stus)
//        sb(stus)
        val sorted:Array[Student] = stus.takeOrdered(2)(new Ordering[Student](){
            override def compare(x: Student, y: Student): Int = {
                x.height.compareTo(y.height)
            }
        })
        sorted.foreach(println)
        sc.stop()
    }
    def sb(stus:RDD[Student])={
        val sorted = stus.sortBy( stu => stu.height,
            numPartitions = 1).mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
            list.toIterator
        })
        sorted.foreach(println)
    }
    def sbk(stus:RDD[Student])={
        val sorted = stus.map(stu => (stu.height,stu))
            .sortByKey(ascending = false,numPartitions = 1)
            .mapPartitionsWithIndex((index,partition) => {
                val list = partition.toList
                println(s"分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
                list.toIterator
            })
        sorted.foreach(println)
    }
}
case class Student(name: String, age: Int, height: Double)
