package org.apache.spark.examples

import org.apache.spark.{SparkConf, SparkContext}
import scala.util.Random

object RDDReduceByKeyOperations {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("RDD ReduceByKey Operations")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    // 1. 基础reduceByKey示例
    val salesRDD = sc.parallelize(List(
      ("apple", 5.0), ("banana", 3.0), ("apple", 4.0),
      ("orange", 2.5), ("banana", 4.0), ("apple", 6.0)
    ))
    println("\n=== 基础reduceByKey示例 ===")
    println("原始销售数据：")
    salesRDD.collect().foreach(println)
    
    // 计算每种水果的总销售额
    val totalSales = salesRDD.reduceByKey(_ + _)
    println("\n每种水果的总销售额：")
    totalSales.collect().foreach { case (fruit, total) =>
      println(f"$fruit: $total%.2f")
    }

    // 2. 复杂对象的reduceByKey
    case class SaleRecord(revenue: Double, quantity: Int) {
      def merge(other: SaleRecord): SaleRecord = {
        SaleRecord(revenue + other.revenue, quantity + other.quantity)
      }
    }
    
    val complexSalesRDD = sc.parallelize(List(
      ("apple", SaleRecord(5.0, 2)),
      ("banana", SaleRecord(3.0, 3)),
      ("apple", SaleRecord(4.0, 1)),
      ("orange", SaleRecord(2.5, 2)),
      ("banana", SaleRecord(4.0, 4)),
      ("apple", SaleRecord(6.0, 3))
    ))
    
    println("\n=== 复杂对象的reduceByKey ===")
    println("原始复杂销售数据：")
    complexSalesRDD.collect().foreach { case (fruit, record) =>
      println(f"$fruit: 收入=${record.revenue}%.2f, 数量=${record.quantity}")
    }
    
    val reducedComplexSales = complexSalesRDD.reduceByKey((a, b) => a.merge(b))
    println("\n合并后的销售数据：")
    reducedComplexSales.collect().foreach { case (fruit, record) =>
      println(f"$fruit: 总收入=${record.revenue}%.2f, 总数量=${record.quantity}")
    }

    // 3. 数值计算的reduceByKey
    val numbersRDD = sc.parallelize(List(
      ("even", 2), ("odd", 3), ("even", 4), 
      ("odd", 5), ("even", 6), ("odd", 7)
    ))
    
    println("\n=== 数值计算的reduceByKey ===")
    
    // 3.1 求和
    val sums = numbersRDD.reduceByKey(_ + _)
    println("按类别求和：")
    sums.collect().foreach(println)
    
    // 3.2 求最大值
    val maxValues = numbersRDD.reduceByKey(math.max)
    println("\n按类别求最大值：")
    maxValues.collect().foreach(println)
    
    // 3.3 求最小值
    val minValues = numbersRDD.reduceByKey(math.min)
    println("\n按类别求最小值：")
    minValues.collect().foreach(println)

    // 4. 大数据量性能测试
    println("\n=== 大数据量性能测试 ===")
    val random = new Random(42)
    val products = Array("apple", "banana", "orange", "grape", "watermelon")
    val largeSalesRDD = sc.parallelize(1 to 1000000).map(_ => {
      val product = products(random.nextInt(products.length))
      val amount = random.nextDouble() * 100
      (product, amount)
    })
    
    println("大数据量reduceByKey前的分区数：" + largeSalesRDD.getNumPartitions)
    val start = System.currentTimeMillis()
    val reducedLargeSales = largeSalesRDD.reduceByKey(_ + _)
    val totalsByProduct = reducedLargeSales.collect()
    val end = System.currentTimeMillis()
    
    println(s"\n处理耗时: ${end - start} ms")
    println("各产品总销售额：")
    totalsByProduct.foreach { case (product, total) =>
      println(f"$product: $total%.2f")
    }

    // 5. 自定义分区数的reduceByKey
    println("\n=== 自定义分区数的reduceByKey ===")
    val customPartitioned = salesRDD.reduceByKey(_ + _, 2)
    println(s"自定义分区后的分区数: ${customPartitioned.getNumPartitions}")

    // 6. 链式操作
    println("\n=== 链式操作 ===")
    val result = salesRDD
      .reduceByKey(_ + _)  // 先求和
      .mapValues(total => f"$total%.2f") // 格式化数值
      .sortBy(_._2, ascending = false)  // 按销售额降序排序
    
    println("按销售额降序排序的结果：")
    result.collect().foreach(println)

    // 7. 条件reduceByKey
    println("\n=== 条件reduceByKey ===")
    // 将销售额大于3的记录进行合并
    val filteredSales = salesRDD
      .filter(_._2 > 3.0)  // 先过滤
      .reduceByKey(_ + _)  // 再归约
    
    println("销售额大于3的合并结果：")
    filteredSales.collect().foreach { case (product, total) =>
      println(f"$product: $total%.2f")
    }

    // 8. reduceByKey vs groupByKey性能对比
    println("\n=== reduceByKey vs groupByKey性能对比 ===")
    
    // reduceByKey方式
    val startReduce = System.currentTimeMillis()
    val reduceResult = largeSalesRDD.reduceByKey(_ + _).count()
    val endReduce = System.currentTimeMillis()
    println(s"reduceByKey耗时: ${endReduce - startReduce} ms")
    
    // groupByKey方式
    val startGroup = System.currentTimeMillis()
    val groupResult = largeSalesRDD.groupByKey().mapValues(_.sum).count()
    val endGroup = System.currentTimeMillis()
    println(s"groupByKey耗时: ${endGroup - startGroup} ms")

    // 暂停以便查看Spark UI
    println("\n程序将暂停5分钟，请在此期间查看Spark UI...")
    Thread.sleep(300000)

    sc.stop()
  }
} 