package cn.doitedu.day05

import cn.doitedu.day01.utils.SparkUtil
import org.apache.spark.rdd.RDD

/**
 * @Date 22.4.3
 * @Created by HANGGE
 * @Description
 */
object C19_转换算子_AggregateByKey {
  def main(args: Array[String]): Unit = {
    val sc = SparkUtil.getSc
    val wordAndOne = sc.textFile("data/a.txt").flatMap(_.split("\\s+")).map((_, 1))
    // 转换算子
    //val res: RDD[(String, Int)] = wordAndOne.aggregateByKey(0)(_ + _, _ + _)
    //当局部计算逻辑和全局的计算逻辑一致的时候使用reduceByKey
    val res: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)
   // res.foreach(println)
   val rdd = sc.parallelize(List(
     ("zss", 88),
     ("zss", 98),
     ("lss", 88),
     ("lss", 68),
     ("lss", 77),
     ("ww", 88)
   ))
    //                                                            分数  次数
    val res2: RDD[(String, (Double, Int))] = rdd.aggregateByKey((0.0, 0))(
      (tp, v) => (tp._1 + v, tp._2 + 1),
      (tp1, tp2) => (tp1._1 + tp2._1, tp1._2 + tp2._2)
    )
    val res3 = res2.map(tp => {
      (tp._1, tp._2._1 / tp._2._2)
    })

    res3.foreach(println)
  }

}
