package org.apache.spark.examples

import org.apache.spark.{SparkConf, SparkContext}

object RDDSortByOperations {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("RDD SortBy Operations")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    // 1. 基础数字排序
    val numbersRDD = sc.parallelize(List(5, 2, 8, 1, 9, 3, 7, 4, 6))
    println("\n=== 基础数字排序 ===")
    println("原始数据：")
    numbersRDD.collect().foreach(x => print(s"$x "))
    
    println("\n升序排序：")
    val ascNumbers = numbersRDD.sortBy(x => x)
    ascNumbers.collect().foreach(x => print(s"$x "))
    
    println("\n降序排序：")
    val descNumbers = numbersRDD.sortBy(x => x, ascending = false)
    descNumbers.collect().foreach(x => print(s"$x "))
    println()

    // 2. 字符串排序
    val wordsRDD = sc.parallelize(List(
      "spark", "hadoop", "hive", "hbase", "flink", 
      "kafka", "zookeeper", "storm", "scala"
    ))
    println("\n=== 字符串排序 ===")
    println("原始数据：")
    wordsRDD.collect().foreach(x => print(s"$x "))
    
    println("\n按字母顺序排序：")
    val sortedWords = wordsRDD.sortBy(identity)
    sortedWords.collect().foreach(x => print(s"$x "))
    
    println("\n按长度排序：")
    val lengthSortedWords = wordsRDD.sortBy(_.length)
    lengthSortedWords.collect().foreach(x => print(s"$x "))
    println()

    // 3. 自定义对象排序
    case class Student(name: String, age: Int, score: Double)
    val studentsRDD = sc.parallelize(List(
      Student("Alice", 20, 85.5),
      Student("Bob", 22, 77.8),
      Student("Charlie", 19, 92.3),
      Student("David", 21, 88.7),
      Student("Eva", 20, 95.5)
    ))
    
    println("\n=== 自定义对象排序 ===")
    println("原始数据：")
    studentsRDD.collect().foreach(s => 
      println(f"${s.name}%-8s 年龄:${s.age}%-3d 分数:${s.score}%.1f")
    )
    
    println("\n按年龄排序：")
    val ageSorted = studentsRDD.sortBy(_.age)
    ageSorted.collect().foreach(s => 
      println(f"${s.name}%-8s 年龄:${s.age}%-3d 分数:${s.score}%.1f")
    )
    
    println("\n按分数降序排序：")
    val scoreSorted = studentsRDD.sortBy(_.score, ascending = false)
    scoreSorted.collect().foreach(s => 
      println(f"${s.name}%-8s 年龄:${s.age}%-3d 分数:${s.score}%.1f")
    )

    // 4. 多字段排序
    println("\n=== 多字段排序 ===")
    println("按年龄升序，同年龄按分数降序：")
    val multiSorted = studentsRDD.sortBy(s => (s.age, -s.score))
    multiSorted.collect().foreach(s => 
      println(f"${s.name}%-8s 年龄:${s.age}%-3d 分数:${s.score}%.1f")
    )

    // 5. 元组排序
    val tupleRDD = sc.parallelize(List(
      (3, "c", 2.0), (1, "a", 3.0), (2, "b", 1.0),
      (2, "d", 2.0), (1, "e", 1.0), (3, "f", 3.0)
    ))
    println("\n=== 元组排序 ===")
    println("原始数据：")
    tupleRDD.collect().foreach(t => print(s"${t._1}-${t._2}-${t._3} "))
    
    println("\n按第一个元素排序：")
    val firstSorted = tupleRDD.sortBy(_._1)
    firstSorted.collect().foreach(t => print(s"${t._1}-${t._2}-${t._3} "))
    
    println("\n按第三个元素降序排序：")
    val thirdSorted = tupleRDD.sortBy(_._3, ascending = false)
    thirdSorted.collect().foreach(t => print(s"${t._1}-${t._2}-${t._3} "))
    println()

    // 6. 自定义排序规则
    println("\n=== 自定义排序规则 ===")
    // 按字符串长度的奇偶性排序，奇数长度在前
    val customSorted = wordsRDD.sortBy(w => (w.length % 2 == 0, w.length))
    println("按字符串长度的奇偶性排序（奇数长度优先）：")
    customSorted.collect().foreach(w => println(s"$w (长度:${w.length})"))

    // 7. 带分区的排序
    println("\n=== 指定分区数的排序 ===")
    val partitionedSort = numbersRDD.sortBy(x => x, numPartitions = 4)
    println(s"排序后的分区数: ${partitionedSort.getNumPartitions}")
    println("排序结果：")
    partitionedSort.collect().foreach(x => print(s"$x "))
    println()

    // 8. 条件排序
    println("\n=== 条件排序 ===")
    // 偶数在前并升序，奇数在后并降序
    val conditionSorted = numbersRDD.sortBy(x => (x % 2 == 0, if (x % 2 == 0) x else -x))
    println("偶数在前升序，奇数在后降序：")
    conditionSorted.collect().foreach(x => print(s"$x "))
    println()

    // 暂停以便查看Spark UI
    Thread.sleep(300000)

    sc.stop()
  }
} 