package org.apache.spark.examples

import org.apache.spark.{SparkConf, SparkContext}

object RDDGroupByOperations {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("RDD GroupBy Operations").setMaster("local[1]")
    val sc = new SparkContext(conf)

    // 创建示例数据
    val numbersRDD = sc.parallelize(1 to 20)
    println("\n=== 原始数据 ===")
    numbersRDD.collect().foreach(x => print(s"$x "))
    println()

    // 1. 基础groupBy：按照奇偶性分组
    val evenOddGroups = numbersRDD.groupBy(x => if (x % 2 == 0) "偶数" else "奇数")
    println("\n=== 按奇偶分组 ===")
    evenOddGroups.collect().foreach { case (key, values) =>
      println(s"$key: ${values.mkString(", ")}")
    }

    // 2. 按照数字范围分组
    val rangeGroups = numbersRDD.groupBy(x => {
      if (x <= 5) "1-5"
      else if (x <= 10) "6-10"
      else if (x <= 15) "11-15"
      else "16-20"
    })
    println("\n=== 按范围分组 ===")
    rangeGroups.collect().toSeq.sortBy(_._1).foreach { case (key, values) =>
      println(s"$key: ${values.mkString(", ")}")
    }

    // 3. 按照余数分组
    val moduloGroups = numbersRDD.groupBy(_ % 3)
    println("\n=== 按除以3的余数分组 ===")
    moduloGroups.collect().toSeq.sortBy(_._1).foreach { case (remainder, values) =>
      println(s"余数 $remainder: ${values.mkString(", ")}")
    }

    // 4. 创建学生成绩数据
    case class Student(name: String, score: Int)
    val students = sc.parallelize(Seq(
      Student("Alice", 85), Student("Bob", 92), Student("Charlie", 78),
      Student("David", 95), Student("Eva", 88), Student("Frank", 73),
      Student("Grace", 90), Student("Henry", 82), Student("Ivy", 87)
    ))

    // 按成绩等级分组
    val gradeGroups = students.groupBy(student => {
      student.score match {
        case score if score >= 90 => "A"
        case score if score >= 80 => "B"
        case score if score >= 70 => "C"
        case _ => "D"
      }
    })
    println("\n=== 按成绩等级分组 ===")
    gradeGroups.collect().toSeq.sortBy(_._1).foreach { case (grade, studentsIter) =>
      println(s"等级 $grade: ${studentsIter.toSeq.map(_.name).mkString(", ")}")
    }

    // 5. 按照字符串长度分组
    val words = sc.parallelize(Seq(
      "apple", "banana", "cherry", "date", "elderberry",
      "fig", "grape", "kiwi", "lemon", "mango"
    ))
    val lengthGroups = words.groupBy(_.length)
    println("\n=== 按单词长度分组 ===")
    lengthGroups.collect().toSeq.sortBy(_._1).foreach { case (length, words) =>
      println(s"长度 $length: ${words.mkString(", ")}")
    }

    // 6. 复合条件分组
    val complexGroups = numbersRDD.groupBy(num => {
      val isEven = num % 2 == 0
      val isGreaterThan10 = num > 10
      (isEven, isGreaterThan10) match {
        case (true, true) => "偶数且大于10"
        case (true, false) => "偶数且小于等于10"
        case (false, true) => "奇数且大于10"
        case (false, false) => "奇数且小于等于10"
      }
    })
    println("\n=== 复合条件分组 ===")
    complexGroups.collect().foreach { case (category, numbers) =>
      println(s"$category: ${numbers.mkString(", ")}")
    }

    // 7. 自定义分组函数
    def customGrouping(n: Int): String = {
      val isPrime = (2 until n).forall(x => n % x != 0)
      if (isPrime && n > 1) "质数"
      else if (n % 4 == 0) "4的倍数"
      else "其他"
    }
    
    val customGroups = numbersRDD.groupBy(customGrouping)
    println("\n=== 自定义分组(质数/4的倍数/其他) ===")
    customGroups.collect().foreach { case (category, numbers) =>
      println(s"$category: ${numbers.mkString(", ")}")
    }

    // 8. 按照字符串首字母分组
    val names = sc.parallelize(Seq(
      "Alice", "Bob", "Charlie", "David", "Eva",
      "Frank", "Grace", "Henry", "Ivy", "Jack"
    ))
    val initialGroups = names.groupBy(_.charAt(0))
    println("\n=== 按首字母分组 ===")
    initialGroups.collect().toSeq.sortBy(_._1).foreach { case (initial, names) =>
      println(s"首字母 $initial: ${names.mkString(", ")}")
    }

    // 暂停以便查看Spark UI
    Thread.sleep(300000)

    sc.stop()
  }
} 