package cn.jly.bigdata.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

/**
 * @author lanyangji
 * @date 2019/11/25 21:12
 */
object Spark02_expr20 {

  def main(args: Array[String]): Unit = {

    val sc: SparkContext = new SparkContext(new SparkConf().setMaster("local[*]").setAppName("test"))

    val listRDD: RDD[Int] = sc.makeRDD(1 to 10, 2)

    val res: Int = listRDD.aggregate(0)(_ + _, _ + _)
    println(res)

    // 不仅分区内要算上这个初始值，分区间也要算上，这是与aggregateByKey的重要区别
    val res2: Int = listRDD.aggregate(10)(_ + _, _ + _)
    println(res2)


    // fold行动算子,aggregate的简化，分区内和分区间操作一致
    println(listRDD.fold(0)(_ + _))
    println(listRDD.fold(10)(_ + _))

    // 保存成文件
    listRDD.saveAsTextFile("out1")
    listRDD.saveAsObjectFile("out2")
    sc.makeRDD(List(("a",1))).saveAsSequenceFile("out3")
  }
}
