package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object Demo2AggregateByKey {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("Demo2AggregateByKey")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    val studentsRDD: RDD[String] = sc.textFile("data/students.txt")

    val clazzKvDS: RDD[(String, Int)] = studentsRDD.map(stu => (stu.split(",")(4), 1))

    /**
     * aggregateByKey: 需要两个函数，一个是map端预聚合的函数，一个reduce端汇总的函数
     * reduceByKey map端和reduce端聚合函数是一样，
     * 如果map端和reduce端要写不一样的聚合函数可以使用aggregateByKey
     *
     */
    val countRDD: RDD[(String, Int)] = clazzKvDS.aggregateByKey(0)(
      (u: Int, i: Int) => u + i,//在map端做聚合函数
      (u1: Int, u2: Int) => u1 + u2//在reduce端做聚合的函数
    )

    countRDD.foreach(println)

  }

}
