package com.xf.day05

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable

object TransformationTest {
  def main(args: Array[String]): Unit = {

    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[*]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // testUnion(sc)
    // testIntersection(sc)
    // testSubtract(sc)
    // testCartesian(sc)
    // testMapValues(sc)
    // testGroupByKey(sc)
    // testReduceBykey(sc)

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)
    val rdd11 = sc.parallelize(1 to 9, 3)

    // 查看实际分区数
    println(s"实际分区数: ${rdd11.getNumPartitions}")

    println("=== 分区数据分布 ===")
    rdd11.mapPartitionsWithIndex { (partitionIndex, iterator) =>
      val partitionData = iterator.toList
      println(s"分区 $partitionIndex: ${partitionData.mkString("[", ", ", "]")}")
      partitionData.iterator
    }.count()


    val rdd12 = rdd11.map(item => (item % 3, item)) // ArrayBuffer((1,1), (2,2), (0,3), (1,4), (2,5), (0,6), (1,7), (2,8), (0,9))
    //  val buffer = rdd12.collect().toBuffer
    // println(buffer)

    // ArrayBuffer((1,1.0), (2,2.0), (0,3.0), (1,4.0), (2,5.0), (0,6.0), (1,7.0), (2,8.0), (0,9.0))
    val result1 :RDD[(Int, Double)] = rdd12.mapValues(v => v.toDouble)
    println( "聚合前的值为: " + result1.collect().toBuffer )
    /**
     * combineByKey 的三个参数分别处理：
     *    1.新键初始化：创建初始聚合器
     *    2.分区内合并：同一分区内相同键的值合并
     *    3.分区间合并：不同分区的聚合结果最终合并
     */
    // ArrayBuffer((1,1.0), (2,2.0), (0,3.0), (1,4.0), (2,5.0), (0,6.0), (1,7.0), (2,8.0), (0,9.0))
    val result2 :RDD[(Int, (Double, Int))] = result1.combineByKey(
      (v: Double) => (v, 1),
      (c: (Double, Int), v: Double) => (c._1 + v, c._2 + 1),
      (c1: (Double, Int), c2: (Double, Int)) => (c1._1 + c2._1 + c2._2, c1._2 + c2._2)
    )

    println( result2.collect().toBuffer )  // ArrayBuffer((0,(20.0,3)), (1,(14.0,3)), (2,(17.0,3)))

  }

  private def testReduceBykey(sc: SparkContext): Unit = {
    val rddMap = sc.parallelize(1 to 12, 4).map(item => (item % 4, item))
    // println(rddMap.collect().toBuffer) // ArrayBuffer((1,1), (2,2), (3,3), (0,4), (1,5), (2,6), (3,7), (0,8), (1,9), (2,10), (3,11), (0,12))
    val rdd13 = rddMap.reduceByKey((x, y) => x + y)
    val buffer = rdd13.collect().toBuffer
    println(buffer) // ArrayBuffer((0,24), (1,15), (2,18), (3,21))
  }

  private def testGroupByKey(sc: SparkContext): Unit = {
    val rdd11 = sc.parallelize(1 to 9, 3)
    // println( rdd11.collect().toBuffer )  // ArrayBuffer(1, 2, 3, 4, 5, 6, 7, 8, 9)
    val rddMap = rdd11.map(item => (item % 3, item))
    val rdd12 = rddMap.groupByKey()
    println(rdd12.collect().toBuffer) // ArrayBuffer((0,Seq(3, 6, 9)), (1,Seq(1, 4, 7)), (2,Seq(2, 5, 8)))
  }

  private def testMapValues(sc: SparkContext): Unit = {
    val rdd11 = sc.parallelize(1 to 9, 3)
    // println( rdd11.collect().toBuffer )  // ArrayBuffer(1, 2, 3, 4, 5, 6, 7, 8, 9)
    val rdd12 = rdd11.map(item => (item % 4, item))
    // println(rdd12.collect().toBuffer) // ArrayBuffer((1,1), (2,2), (3,3), (0,4), (1,5), (2,6), (3,7), (0,8), (1,9))
    val rdd13 = rdd12.mapValues(v => v + 10)
    println(rdd13.collect().toBuffer)
  }

  private def testCartesian(sc: SparkContext): Unit = {
    val rdd9 = sc.parallelize(List(1, 3, 5))
    val rdd10 = sc.parallelize(List(2, 4, 6))
    val result = rdd9.cartesian(rdd10)
    println(result.collect().toBuffer) // ArrayBuffer((1,2), (1,4), (1,6), (3,2), (3,4), (3,6), (5,2), (5,4), (5,6))
  }

  private def testSubtract(sc: SparkContext): Unit = {
    val rdd6 = sc.parallelize(List(1, 3, 4, 5))
    val rdd7 = sc.parallelize(1 to 5) // 1 2 3 4 5
    val rdd8 = rdd7.subtract(rdd6)
    val buffer = rdd8.collect().toBuffer
    println(buffer) // ArrayBuffer(2)
  }

  private def testIntersection(sc: SparkContext): Unit = {
    val rdda = sc.parallelize(List(1, 3, 3, 4, 4, 5))
    val rddb = sc.parallelize(List(2, 3, 4, 3, 4, 6))
    val result = rdda.intersection(rddb)
    val buffer = result.collect().toBuffer
    println(buffer) // ArrayBuffer(3, 4)
  }

  private def testUnion(sc: SparkContext): Unit = {
    // testFlatMap(sc)
    // testDistinct(sc)
    val rdd6: RDD[Int] = sc.parallelize(List(1, 3, 4, 5))
    val rdd7: RDD[Int] = sc.parallelize(List(2, 3, 4))
    val result = rdd6.union(rdd7)
    val buffer = result.collect().toBuffer
    println(buffer) // ArrayBuffer(1, 3, 4, 5, 2, 3, 4)
  }

  private def testDistinct(sc: SparkContext): Unit = {
    val rdd = sc.parallelize(List(1, 2, 1, 5, 3, 5, 4, 8, 6, 4))
    val result: mutable.Buffer[Int] = rdd.distinct().collect().toBuffer
    println(result)
  }

  private def testFlatMap(sc: SparkContext): Unit = {
    val rdd1: RDD[Int] = sc.parallelize(List(1, 2, 3, 4))

    val rdd2: RDD[Int] = rdd1.map(_ * 2)

    println(rdd2.collect().toBuffer) // ArrayBuffer(2, 4, 6, 8)

    println(rdd2.collect().mkString(",")) // 2,4,6,8

    val rdd3: RDD[Int] = rdd2.filter(x => x > 5)

    println(rdd3.collect().toBuffer) // ArrayBuffer(6, 8)

    val rdd4: RDD[Int] = rdd3.flatMap(x => x to 9)

    println(rdd4.collect().toBuffer) // ArrayBuffer(6, 7, 8, 9, 8, 9)
  }
}
