package com.scala

/*
aggregate函数把我们从返回值类型必须与操作的RDD类型相同的限制中解放出来
 */

import org.apache.spark.{SparkConf, SparkContext}

object Test4 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("test4")
    val sc = new SparkContext(conf);
    val input = sc.parallelize(List(2, 3,4));
    //reduce整合RDD中的数据，例如sum
    val o_reduce=input.reduce((x,y)=>x+y);
    //fold和reduce类似，但是需要提供初始化值
    val o_fold = input.fold(1)((x,y)=>x*y);
    val result=input.aggregate((0, 0))((acc, value) => (acc._1 + value, acc._2 + 1), (acc1, acc2) => (acc1._1 + acc2._1, acc1._2 + acc2._2));
    val avg = result._1/result._2.toDouble;
    println(avg);
    println(o_reduce);
    println(o_fold);
  }
}
