package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo10ReduceByKey {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()

    conf.setAppName("Demo10ReduceByKey")
    conf.setMaster("local")

    val sc: SparkContext = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")

    stuRDD
      .map(line => (line.split(",")(4), 1))
      // 既可以按照Key进行分组，同时也可以对Value进行聚合，
      // 还可以再Reduce之前也就是Map阶段进行预聚合操作
      //      .reduceByKey((i1, i2) => i1 + i2)
      // 简写
      .reduceByKey(_ + _)
      .foreach(println)

    // 统计班级的平均年龄
    stuRDD
      .map(line => (line.split(",")(4), line.split(",")(2).toInt))
      .reduceByKey((i1, i2) => {
        (i1 + i2) / 2 // 直接统计有问题，预聚合无法直接实现求平均的操作
      })
      .foreach(println)

    stuRDD
      .map(line => (line.split(",")(4), (line.split(",")(2).toDouble, 1)))
      // 先计算age之和以及count统计人数，求和求count都是可以做预聚合的
      // 最后相除间接实现求平均
      //      .reduceByKey((ageCnt01: (Double, Int), ageCnt02: (Double, Int)) => {
      //        (ageCnt01._1 + ageCnt02._1, ageCnt01._2 + ageCnt02._2)
      //      })
      .reduceByKey((ageCnt01, ageCnt02) => (ageCnt01._1 + ageCnt02._1, ageCnt01._2 + ageCnt02._2))
      .map(t2 => s"${t2._1},${t2._2._1 / t2._2._2}")
      .foreach(println)

  }

}
