package com.ada.spark.rddoperator

import org.apache.spark.{SparkConf, SparkContext}

/**
  * 作用：对相同K，把V合并成一个集合。
  */
object Spark24_combineByKey {

    def main(args: Array[String]): Unit = {
        //创建SparkConf
        val conf = new SparkConf().setAppName("Spark24_combineByKey").setMaster("local[*]")
        //创建Spark上下文对象
        val sc = new SparkContext(conf)

        val input = sc.parallelize(Array(("a", 88), ("b", 95), ("a", 91), ("b", 93), ("a", 95), ("b", 98)), 2)

        //分区情况
        input.glom().collect().foreach(datas => println(datas.mkString(",")))
        //分区0：(a,88),(b,95),(a,91)
        //分区1：(b,93),(a,95),(b,98)

        //将相同key对应的值相加，同时记录该key出现的次数，放入一个二元组
        //参数：(createCombiner: V => C,  mergeValue: (C, V) => C,  mergeCombiners: (C, C) => C)
        val combine = input.combineByKey((_, 1), (acc: (Int, Int), v) => (acc._1 + v, acc._2 + 1), (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2))

        println(combine.collect().mkString(","))
        //(b,(286,3)),(a,(274,3))

        val result = combine.map { case (key, value) => (key, value._1 / value._2.toDouble) }
        println(result.collect().mkString(","))
        //(b,95.33333333333333),(a,91.33333333333333)
    }

}
