package com.dxf.bigdata.D05_spark_again

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 分区内 a规则 + 分区间b规则
 * reduceByKey 是分区内和分区间都是同一个规则c
 */
object AggregateByKey {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("app")

    val sc = new SparkContext(sparkConf)

    val value: RDD[(String, Int)] = sc.makeRDD(List(("a", 1), ("a", 2), ("c", 3), ("a", 4)))
    value.aggregateByKey(0)(
      (a, b) => math.max(a, b), // 分区内计算规则
      (a, b) => a + b // 分区间计算规则
    ).collect().foreach(println)


    // 求平均值 a= 7/3 c =3
    value.aggregateByKey((0, 0))(
      (u, v) => (u._1 + v, u._2 + 1)
      ,
      (t1, t2) => (t1._1 + t2._1, t1._2 + t2._2)
    ).map(x => (x._1,(x._2._1.toDouble / x._2._2).formatted("%.2f"))).collect().foreach(println)

    // 求平均值 a= 7/3 c =3 简写
    val newRdd: RDD[(String, (Int, Int))] = value.aggregateByKey((0, 0))(
      (u, v) => (u._1 + v, u._2 + 1)
      ,
      (t1, t2) => (t1._1 + t2._1, t1._2 + t2._2)
    )
    val value1: RDD[(String, Int)] = newRdd.mapValues {
      case (num: Int, count: Int) => num / count
    }
    value1.foreach(println)

  }

}
