package org.apache.spark.examples

import org.apache.spark.{SparkConf, SparkContext}
import scala.util.Random

object RDDDistinctOperations {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("RDD Distinct Operations")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    // 1. 基础数字去重
    val numbersRDD = sc.parallelize(List(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5))
    println("\n=== 基础数字去重 ===")
    println("原始数据：")
    numbersRDD.collect().foreach(x => print(s"$x "))
    println("\n去重后：")
    val distinctNumbers = numbersRDD.distinct()
    distinctNumbers.collect().sorted.foreach(x => print(s"$x "))
    println()

    // 2. 字符串去重
    val wordsRDD = sc.parallelize(List(
      "spark", "hadoop", "spark", "hive", "hadoop", "spark", 
      "hbase", "hive", "spark", "hadoop"
    ))
    println("\n=== 字符串去重 ===")
    println("原始数据：")
    wordsRDD.collect().foreach(x => print(s"$x "))
    println("\n去重后：")
    val distinctWords = wordsRDD.distinct()
    distinctWords.collect().sorted.foreach(x => print(s"$x "))
    println()

    // 3. 自定义对象去重
    case class Person(name: String, age: Int)
    val peopleRDD = sc.parallelize(List(
      Person("Alice", 25), Person("Bob", 30),
      Person("Alice", 25), Person("Charlie", 35),
      Person("Bob", 30), Person("David", 40),
      Person("Alice", 25)
    ))
    println("\n=== 自定义对象去重 ===")
    println("原始数据：")
    peopleRDD.collect().foreach(p => println(s"${p.name}(${p.age})"))
    println("去重后：")
    val distinctPeople = peopleRDD.distinct()
    distinctPeople.collect().sortBy(_.name).foreach(p => println(s"${p.name}(${p.age})"))

    // 4. 元组去重
    val tupleRDD = sc.parallelize(List(
      (1, "a"), (2, "b"), (1, "a"), (3, "c"),
      (2, "b"), (4, "d"), (1, "a"), (2, "b")
    ))
    println("\n=== 元组去重 ===")
    println("原始数据：")
    tupleRDD.collect().foreach(t => print(s"${t._1}-${t._2} "))
    println("\n去重后：")
    val distinctTuples = tupleRDD.distinct()
    distinctTuples.collect().sortBy(_._1).foreach(t => print(s"${t._1}-${t._2} "))
    println()

    // 5. 大数据量去重性能测试
    val random = new Random(42)
    val largeRDD = sc.parallelize(1 to 1000000).map(_ => random.nextInt(10000))
    println("\n=== 大数据量去重性能测试 ===")
    println(s"原始数据量: ${largeRDD.count()}")
    val distinctLarge = largeRDD.distinct()
    println(s"去重后数据量: ${distinctLarge.count()}")

    // 6. 带分区数的去重
    println("\n=== 指定分区数的去重 ===")
    val distinctWithPartitions = largeRDD.distinct(8) // 指定8个分区
    println(s"去重后分区数: ${distinctWithPartitions.getNumPartitions}")
    println(s"去重后数据量: ${distinctWithPartitions.count()}")

    // 7. 复合数据去重
    val complexRDD = sc.parallelize(List(
      (1, Set("a", "b")), (2, Set("b", "c")),
      (1, Set("a", "b")), (3, Set("c", "d")),
      (2, Set("b", "c")), (4, Set("d", "e")),
      (1, Set("a", "b"))
    ))
    println("\n=== 复合数据去重 ===")
    println("原始数据：")
    complexRDD.collect().foreach(x => println(s"${x._1}: ${x._2.mkString(",")}"))
    println("去重后：")
    val distinctComplex = complexRDD.distinct()
    distinctComplex.collect().sortBy(_._1).foreach(x => println(s"${x._1}: ${x._2.mkString(",")}"))

    // 8. 条件去重（先过滤后去重）
    val mixedNumbersRDD = sc.parallelize(List(1, -2, 2, -3, 3, -3, 4, -4, 4, -5, 5, -5))
    println("\n=== 条件去重（只保留正数）===")
    println("原始数据：")
    mixedNumbersRDD.collect().foreach(x => print(s"$x "))
    println("\n条件去重后：")
    val positiveDistinct = mixedNumbersRDD.filter(_ > 0).distinct()
    positiveDistinct.collect().sorted.foreach(x => print(s"$x "))
    println()

    // 暂停以便查看Spark UI
    Thread.sleep(300000)

    sc.stop()
  }
} 