package com.sjc.action

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RDD_28_treeAggregate {
    
    def main(args: Array[String]): Unit = {
        
        // 初始化编程入口
        val sparkConf = new SparkConf().setMaster("local").setAppName("RDD_Test")
        val sc = new SparkContext(sparkConf)
        
        val data = List(1, 2, 3, 4, 5)
        val dataRDD: RDD[Int] = sc.parallelize(data, 3)
        
        def myfunc(index: Int, iter: Iterator[(Int)]): Iterator[String] = {
            iter.map(x => "[partID:" + index + ", val: " + x + "]")
        }
        
        val resultValues: Array[String] = dataRDD.mapPartitionsWithIndex(myfunc).collect
        for (elem <- resultValues) {
            println(elem)
        }
        
        // 先求每个分区的最大值，然后求每个分区的总和
        val resultValue: Int = dataRDD.treeAggregate(0)(math.max(_, _), _ + _)
        
        println(resultValue)
    }
}
