package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo11AggregateByKey {
  def main(args: Array[String]): Unit = {
    /**
     *
     */
    val conf: SparkConf = new SparkConf()

    conf.setAppName("Demo11AggregateByKey")
    conf.setMaster("local")

    val sc: SparkContext = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")
    // 统计班级人数
    stuRDD
      .map(line => (line.split(",")(4), 1))

    /**
     * zeroValue:U 初始化的值，类型自定义
     * seqOp:(U,KV格式的RDD的V的类型)=>U 预聚合的操作
     * combOp:(U,U)=>U  聚合的操作
     *
     * 同reduceByKey的区别在于：
     * 两者都能做预聚合，只不过在aggregateByKey中将聚合以及预聚合操作分开来了，可以单独定义
     * reduceByKey相当于聚合和预聚合操作共用相同的计算逻辑
     */
    .aggregateByKey(0)((z,v)=>z+v,(i1,i2)=>i1+i2)
//    .aggregateByKey(0)(_+_ ,_+_) // 简写
      .foreach(println)

    // 统计班级平均年龄
    stuRDD
      .map(line => (line.split(",")(4), line.split(",")(2).toDouble))
      .aggregateByKey((0.0,0))(seqOp = (u:(Double,Int),age:Double)=>{
        (u._1 + age,u._2 + 1)
      },combOp = (u1:(Double,Int),u2:(Double,Int))=>{
        (u1._1 + u2._1,u2._2 + u2._2)
      })
      .map(t2=>s"${t2._1},${t2._2._1/t2._2._2}")
      .foreach(println)

  }

}
