package hello
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.log4j.{Level, Logger}
object sortByKey {
  def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setMaster("local[2]").setAppName(_02SparkTransformationOps.getClass.getSimpleName)
        Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
        val sc = new SparkContext(conf)
        transformationOps9(sc)
        sc.stop()
    }

  def transformationOps9(sc: SparkContext): Unit = {
        val list = List(
            "1,李  磊,22,175",
            "2,刘银鹏,23,175",
            "3,齐彦鹏,22,180",
            "4,杨  柳,22,168",
            "5,敦  鹏,20,175"
        )
        val listRDD:RDD[String] = sc.parallelize(list)

       
        // 使用sortByKey完成操作,只做身高降序排序
        val heightRDD:RDD[(String, String)] = listRDD.map(line => {
            val fields = line.split(",")
            (fields(3), line)
        })
        val retRDD:RDD[(String, String)] = heightRDD.sortByKey(ascending = false, numPartitions = 1)   // 需要设置1个分区，否则只是各分区内有序
        retRDD.foreach(println)
        // 使用sortByKey如何实现sortBy的二次排序？将上面的信息写成一个java对象，然后重写compareTo方法，在做map时，key就为该对象本身，而value可以为null

    }

}