package org.apache.spark.examples

import org.apache.spark.{SparkConf, SparkContext}

object RDDKeyValueOperations {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("RDD Key-Value Operations")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    // 1. 创建键值对RDD的不同方式
    println("\n=== 创建键值对RDD ===")
    
    // 1.1 直接创建元组
    val pairRDD1 = sc.parallelize(List(
      ("apple", 3), ("banana", 2), ("apple", 5), ("orange", 4)
    ))
    println("方式1 - 直接创建元组：")
    pairRDD1.collect().foreach(println)

    // 1.2 通过map转换
    val words = sc.parallelize(List("apple", "banana", "apple", "orange"))
    val pairRDD2 = words.map(word => (word, 1))
    println("\n方式2 - 通过map转换：")
    pairRDD2.collect().foreach(println)

    // 1.3 使用case class
    case class Product(name: String, price: Double)
    val products = sc.parallelize(List(
      Product("apple", 2.5), Product("banana", 1.5), 
      Product("orange", 3.0)
    ))
    val pairRDD3 = products.map(p => (p.name, p.price))
    println("\n方式3 - 使用case class转换：")
    pairRDD3.collect().foreach(println)

    // 2. 键值对RDD的基本转换操作
    println("\n=== 基本转换操作 ===")
    
    // 2.1 按键分组
    val groupedByKey = pairRDD1.groupByKey()
    println("groupByKey结果：")
    groupedByKey.collect().foreach { case (k, v) =>
      println(s"$k: ${v.mkString(",")}")
    }

    // 2.2 按键归约
    val reducedByKey = pairRDD1.reduceByKey(_ + _)
    println("\nreduceByKey结果：")
    reducedByKey.collect().foreach(println)

    // 2.3 聚合操作
    val aggregated = pairRDD1.aggregateByKey(0)(
      (acc, value) => acc + value,  // 分区内聚合
      (acc1, acc2) => acc1 + acc2   // 分区间聚合
    )
    println("\naggregateByKey结果：")
    aggregated.collect().foreach(println)

    // 3. 复杂键值对操作
    println("\n=== 复杂键值对操作 ===")
    
    // 3.1 创建两个键值对RDD
    val orders = sc.parallelize(List(
      (1, ("apple", 5)),
      (2, ("banana", 3)),
      (3, ("apple", 2)),
      (4, ("orange", 4))
    ))
    
    val prices = sc.parallelize(List(
      ("apple", 2.5),
      ("banana", 1.5),
      ("orange", 3.0)
    ))

    // 3.2 join操作
    val joined = orders.map { case (id, (product, quantity)) => 
      (product, (id, quantity))
    }.join(prices)
    
    println("Join结果（订单和价格）：")
    joined.collect().foreach { case (product, ((id, quantity), price)) =>
      println(f"订单$id: $product x $quantity = ${quantity * price}%.2f")
    }

    // 4. 键值对的高级操作
    println("\n=== 高级操作 ===")
    
    // 4.1 combineByKey示例：计算每个键的平均值
    val sumCount = pairRDD1.combineByKey(
      (value) => (value, 1),                          // 创建组合器
      (acc: (Int, Int), value) => (acc._1 + value, acc._2 + 1),    // 分区内合并
      (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2) // 分区间合并
    )
    
    println("平均值计算结果：")
    sumCount.mapValues { case (sum, count) => 
      sum.toDouble / count 
    }.collect().foreach(println)

    // 4.2 foldByKey示例
    val folded = pairRDD1.foldByKey(0)(_ + _)
    println("\nfoldByKey结果：")
    folded.collect().foreach(println)

    // 5. 分区操作
    println("\n=== 分区操作 ===")
    
    // 5.1 自定义分区器
    val customPartitioned = pairRDD1.partitionBy(new org.apache.spark.HashPartitioner(2))
    println(s"自定义分区数: ${customPartitioned.getNumPartitions}")

    // 5.2 mapValues和flatMapValues
    println("\nmapValues示例（价格加倍）：")
    pairRDD3.mapValues(_ * 2).collect().foreach(println)

    println("\nflatMapValues示例（生成价格区间）：")
    pairRDD3.flatMapValues(price => List(price - 0.5, price, price + 0.5))
      .collect().foreach(println)

    // 6. 键值对排序
    println("\n=== 键值对排序 ===")
    
    // 6.1 按键排序
    println("按键排序：")
    pairRDD1.sortByKey().collect().foreach(println)

    // 6.2 按值排序
    println("\n按值排序：")
    pairRDD1.sortBy(_._2, ascending = false).collect().foreach(println)

    // 暂停以便查看Spark UI
    Thread.sleep(300000)

    sc.stop()
  }
} 