package com.xf.day05

import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.parallel.CollectionConverters.seqIsParallelizable
import scala.collection.parallel.ParSeq

object TestAggregate {
  def main(args: Array[String]): Unit = {

    // 创建 SparkConf 对象
    val conf = new SparkConf()
      .setAppName("WordCount")
      .setMaster("local[16]")
      .set("spark.ui.port", "8080")
      .set("spark.driver.host", "127.0.0.1")

    // 创建 SparkContext 对象
    val sc = new SparkContext(conf)

    val rdd16 = sc.parallelize( 1 to 4)
    rdd16.foreach( x => println("我的值为:" + x ))

    // 查看分区数据分布
    println("=== 方法1: 使用 mapPartitionsWithIndex ===")
    val partitionData = rdd16.mapPartitionsWithIndex { (partitionIndex, iterator) =>
      val data = iterator.toList
      Iterator(s"分区$partitionIndex: 数据 = ${data.mkString("[", ", ", "]")}, 元素个数 = ${data.size}")
    }
    // 收集并打印所有分区信息
    partitionData.collect().foreach(x => println("我遍历的值为: ===>" + x ))

    val comResult = rdd16.aggregate(3)(seqOp, combOp)
    println(comResult) // 一个进程的时候是 75 ,是 16个进程的时候为啥是 69

    val ls = List[Int](1, 2, 3, 4, 5)
    val res:Int = ls.aggregate(10)(_ + _, _ + _)
    println(res)  // 25

    val par:ParSeq[Int] = ls.par
    val res2:Int = par.aggregate(10)(_ + _, _ + _)
    println(res2)  // 65


  }

  def seqOp(x1:Int, x2:Int): Int = {
        x1 * x2
  }

  def combOp(x3:Int, x4:Int): Int = {
        x3 + x4
  }

}
