package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo15AggregateByKey {
  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf()
    conf.setAppName("Demo14MapValues")
    conf.setMaster("local")

    val sc: SparkContext = new SparkContext(conf)


    val genderRDD: RDD[(String, Int)] = sc
      .textFile("Spark/data/students.txt")
      .map(line => {
        val splits: Array[String] = line.split(",")
        val gender: String = splits(3)
        (gender, 1)
      })

    // 统计性别人数
    genderRDD
      .reduceByKey(_ + _) // 有限制 不能做 平均值
      .foreach(println)

    /**
     * aggregateByKey 转换算子
     * zeroValue 初始值
     * seqOp 聚合方法 map端的聚合
     * combOp 聚合方法 reduce端的聚合
     * 可以将多个聚合操作放在一块实现
     *
     * 主要时为了解决reduceByKey无法直接统计平均值
     */
    genderRDD
      .aggregateByKey(0)(
        (u1: Int, u2: Int) => {
          u1 + u2 // 处理分区内的数据
        }, (mapU1: Int, mapU2: Int) => {
          mapU1 + mapU2 // 处理分区间的数据
        }
      ).foreach(println)

    // 统计每个班级的平均年龄
    // 无法使用reduceByKey

    val clazzAgeRDD: RDD[(String, Int)] = sc
      .textFile("Spark/data/students.txt")
      .map(line => {
        val splits: Array[String] = line.split(",")
        val clazz: String = splits(4)
        val age: Int = splits(2).toInt
        (clazz, age)
      })

    clazzAgeRDD
      .aggregateByKey((0, 0))( // 初始化两个值 第一个用于保存age的累加和 第二个用于统计人数
        (u1: (Int, Int), age: Int) => {
          val mapAgeSum: Int = u1._1 + age
          val mapAgeCnt: Int = u1._2 + 1
          (mapAgeSum, mapAgeCnt)
        }, (mapU1: (Int, Int), mapU2: (Int, Int)) => {
          val ageSum: Int = mapU1._1 + mapU2._1
          val ageCnt: Int = mapU1._2 + mapU2._2
          (ageSum, ageCnt)
        }
      )
      .map {
        case (clazz: String, (ageSum: Int, ageCnt: Int)) =>
          s"$clazz,${ageSum / ageCnt.toDouble}"
      }.foreach(println)


  }

}
