package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo15AggAvgAge {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("Demo14Agg")


    val sc = new SparkContext(conf)

    /**
      * 计算班级的平均年龄
      *
      */

    val studentsRDD: RDD[String] = sc.textFile("data/students.txt")

    //取出班级和年龄
    val clazzAndAge: RDD[(String, Double)] = studentsRDD.map(student => {
      val split: Array[String] = student.split(",")
      (split(4), split(2).toDouble)
    })

    /**
      * 1、使用groupByKey
      *
      */

    val groupByRDD: RDD[(String, Iterable[Double])] = clazzAndAge.groupByKey()

    //计算平均年龄
    val avgAgeRDD: RDD[(String, Double)] = groupByRDD.map {
      case (clazz: String, ages: Iterable[Double]) =>
        val avgAge: Double = ages.sum / ages.size
        (clazz, avgAge)
    }

    avgAgeRDD.foreach(println)

    /**
      * 再大数据计算组shuffle是最耗时间的，shuffle过程是数据是需要落地到磁盘的
      *
      * aggregateByKey: 会再map端做预聚合，性能高, 可以减少shuffle过程中的数据量
      * 1、初始值，初始值可以有多个
      * 2、map端的聚合函数
      * 3、reduce端的聚合函数
      */

    val aggRDD: RDD[(String, (Double, Int))] = clazzAndAge.aggregateByKey((0.0, 0))(
      (u: (Double, Int), age: Double) => (u._1 + age, u._2 + 1), //map端的聚合函数
      (u1: (Double, Int), u2: (Double, Int)) => (u1._1 + u2._1, u1._2 + u2._2) //reduce端的聚合函数
    )

    //计算平均年龄
    val avgAgeRDD2: RDD[(String, Double)] = aggRDD.map {
      case (clazz: String, (sumAge: Double, num: Int)) =>
        (clazz, sumAge / num)
    }

    avgAgeRDD2.foreach(println)

    while (true){

    }


  }

}
