import org.apache.spark.sql.SparkSession

object SparkRDDOperations {
  def main(args: Array[String]): Unit = {
    
    val spark = SparkSession.builder()
      .appName("Spark RDD Operations")
      .master("local[*]")
      .getOrCreate()

    
    val sc = spark.sparkContext

    
    sc.setLogLevel("WARN")

    // 实验题目一
    val rdd1 = sc.parallelize(1 to 100)
    val result1 = rdd1.filter(_ > 50).map(_ * 2).filter(_ > 100)
    println("Result 1:")
    result1.collect().foreach(println)

    // 实验题目二
    val rdd2 = sc.parallelize(Seq("Hello World", "Spark RDD Example", "Big Data"))
    val result2 = rdd2.flatMap(_.toLowerCase.split(" "))
    println("Result 2:")
    result2.collect().foreach(println)

    // 实验题目三
    val rdd3 = sc.parallelize(Seq(("Alice", 90), ("Bob", 80), ("Charlie", 70), ("Alice", 85), ("Bob", 95)))
    val result3 = rdd3.reduceByKey(_ + _)
    println("Result 3:")
    result3.collect().foreach(println)

    // 实验题目四
    val rdd4 = sc.parallelize(Seq("apple", "banana", "orange", "pear", "watermelon", "grape", "pineapple"))
    val result4 = rdd4.partitionBy(new org.apache.spark.HashPartitioner(2))
    println("Result 4:")
    result4.glom().collect().foreach(arr => println(arr.mkString(", ")))

    
    spark.stop()
  }
}

