package cn.wangjie.spark.operations.agg

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext, TaskContext}

/**
 * RDD中聚合函数：reduce、fold
 */
object SparkAggTest {

  def main(args: Array[String]): Unit = {
		// 1. 构建SparkContext上下为实例对象
		val sc: SparkContext = {
			// 1.a 创建SparkConf对象，设置应用属性
			val sparkConf = new SparkConf()
				.setAppName(this.getClass.getSimpleName.stripSuffix("$"))
				.setMaster("local[2]")
			// 1.b 传递SparkConf对象，创建上下文实例
			SparkContext.getOrCreate(sparkConf)
		}
		
		// 通过并行化方式创建RDD
		val list: List[Int] = (1 to 10).toList
		
		val datasRDD: RDD[Int] = sc.parallelize(list, numSlices = 2)
		datasRDD.foreachPartition{iter =>
			println(s"p-${TaskContext.getPartitionId()}: ${iter.mkString(", ")}")
		}
		
		println("========================== reduce ==============================")
		
		val result: Int = datasRDD
			.reduce((tmp, item) => {
				println(s"p-${TaskContext.getPartitionId()}, tmp = $tmp, item = $item")
				tmp + item
			})
		println(s"result = $result")
		
		println("========================== fold ==============================")
		
		val result2: Int = datasRDD
			.fold(0)((tmp, item) => {
				println(s"p-${TaskContext.getPartitionId()}, tmp = $tmp, item = $item")
				tmp + item
			})
		println(s"result = $result2")
		
		
		println("========================== aggregate ==============================")
		/*
			def aggregate[U: ClassTag]
			// 聚合时中间临时变量的初始值
			(zeroValue: U)
			(
				// 分区内聚合：对每个分区中数据聚合操作
				seqOp: (U, T) => U,
				// 分区间聚合：对每个分区聚合的结果全局聚合操作
				combOp: (U, U) => U
			): U
		 */
		datasRDD.aggregate(0)(
			// seqOp: (U, T) => U
			(tmp, item) => {
				println(s"p-${TaskContext.getPartitionId()}, tmp = $tmp, item = $item")
				tmp + item
			},
			// combOp: (U, U) => U
			(tmp, item) => {
				println(s"p-${TaskContext.getPartitionId()}, tmp = $tmp, item = $item")
				tmp + item
			}
		)
		
		
		// 5. 应用运行结束，关闭资源
		sc.stop()
	}
	
}
