package com.atguigu.sparkcore.rdd.kvs

import com.atguigu.sparkcore.util.MySparkContextUtil
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 通过key进行汇总，返回一个迭代器
  * author 剧情再美终是戏
  * mail 13286520398@163.com
  * date 2020/1/7 6:29 
  * version 1.0
  * <p>des 参数列表：</p>
  * <p>1、zeroValue：给每一个分区中的每一个key一个初始值 </p>
  * <p>2、seqOp：函数用于在每一个分区中用初始值逐步迭代value</p>
  * <p>3、combOp：函数用于合并每个分区中的结果</p>
  **/
object AggregateBykey {

  def main(args: Array[String]): Unit = {

    // 获取sparkContext
    val sc = MySparkContextUtil.get(args)

    // 创建rdd
    val list = List(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8))
    val rdd = sc.makeRDD(list, 2)

    // 转换rdd

    // 1、获取每个分区内最大的值，然后分区间相加
    //    val result = rdd.aggregateByKey(zeroValue=Int.MinValue)(_.max(_), _ + _)

    // 1.1 case
    //    val result = rdd.aggregateByKey(zeroValue = Int.MinValue)(
    //      {
    //        case (maxs, v) => (maxs.max(v))
    //      },
    //      {
    //        case (sum, v) => (sum + v)
    //      }
    //    )

    // 2、获取平均值
    //    val result = rdd.aggregateByKey(zeroValue = (0, 0))((z, v) => (z._1 + v, z._2 + 1), (v1, v2) => (v1._1 + v2._1, v1._2 + v2._2)).mapValues(x => x._1.toDouble / x._2)

    // 2.1 case mapv
    //    val result = rdd.aggregateByKey(zeroValue = (0, 0))(
    //      {
    //        case ((sum, count), v) => (sum + v, count + 1)
    //      },
    //      {
    //        case ((sum1, count1), (sum2, count2)) => (sum1 + sum2, count1 + count2)
    //      }
    //    ).map(
    //      {
    //        case (k, (sum, count)) => (k, sum.toDouble / count)
    //      }
    //    )

    // 2.1 case mapvalue
    //    val result = rdd.aggregateByKey(zeroValue = (0, 0))(
    //      {
    //        case ((sum, count), v) => (sum + v, count + 1)
    //      },
    //      {
    //        case ((sum1, count1), (sum2, count2)) => (sum1 + sum2, count1 + count2)
    //      }
    //    ).mapValues(
    //      {
    //        case (sum, count) => sum.toDouble / count
    //      }
    //    )

    // 3、获取每个分区内最大，最小的值，然后求最大值，最小值的和
    //    val result = rdd.aggregateByKey(zeroValue = (Int.MinValue, Int.MaxValue))((z, v) => (z._1.max(v), z._2.min(v)), (v1, v2) => (v1._1 + v2._1, v1._2 + v2._2))

    // 3.1 case
    val result = rdd.aggregateByKey(zeroValue = (Int.MinValue, Int.MaxValue))(
      {
        case ((maxs, mins), v) => (maxs.max(v), mins.min(v))
      },
      {
        case ((max1, min1), (max2, min2)) => (max1 + max2, min1 + min2)
      }
    )

    // 输出
    println(rdd.getNumPartitions)
    println(result.collect().mkString(","))

MySparkContextUtil.close(sc)
  }

}
