package com.ada.spark.rddoperator

import org.apache.spark.{SparkConf, SparkContext}

/**
  * 作用：aggregateByKey的简化操作，seqop和combop相同
  */
object Spark23_foldByKey {

    def main(args: Array[String]): Unit = {
        //创建SparkConf
        val conf = new SparkConf().setAppName("Spark23_foldByKey").setMaster("local[*]")
        //创建Spark上下文对象
        val sc = new SparkContext(conf)

        val rdd = sc.parallelize(List((1, 3), (1, 2), (1, 4), (2, 3), (3, 6), (3, 8)), 3)

        //分区情况
        rdd.glom().collect().foreach(datas => println(datas.mkString(",")))
        //分区0：(1,3),(1,2)
        //分区1：(1,4),(2,3)
        //分区2：(3,6),(3,8)

        //取出每个分区相同key对应值的最大值，然后相加
        val agg = rdd.foldByKey(0)(_ + _)
        //aggregateByKey的简化操作，seqop和combop相同
        //val agg = rddoperator.aggregateByKey(0)(_ + _, _ + _)

        println(agg.collect().mkString(","))
        //(3,14),(1,9),(2,3)
    }

}
