package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object Demo3ReduceByKey {
  def main(args: Array[String]): Unit = {
    //创建spark sql环境（新版spark统一的入口）
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //获取sparkContext
    val sc: SparkContext = spark.sparkContext

    //读取数据
    val studentRDD: RDD[String] = sc.textFile("data/students.txt")

    val kvRDD: RDD[(String, Int)] = studentRDD.map(stu => {
      val split: Array[String] = stu.split(",")
      (split(4), 1)
    })

    /**
     * reduceByKey: 会在mao端做预聚合
     */
    val countRDD: RDD[(String, Int)] = kvRDD.reduceByKey((x, y) => x + y)

    countRDD.foreach(println)

    /**
     * aggregateByKey:也会在map端做预聚合
     * map端和reduce的聚合函数可以不一样
     */


    val numRDD: RDD[(String, Int)] = kvRDD.aggregateByKey(0)(
      (x, y) => x + y, //map端聚合函数
      (i, j) => i + j //reduce端聚合函数
    )

    numRDD.foreach(println)

  }
}
