package com.dxf.bigdata.D05_spark_again

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 类似 aggregateByKey
 * 区别在于把RDD中的第一个值进行转换,转后后再执行第二个函数
 */
object CombineByKey {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("app")

    val sc = new SparkContext(sparkConf)

    val value: RDD[(String, Int)] = sc.makeRDD(List(("a", 1), ("a", 2), ("c", 3), ("a", 4)),2)

    //      createCombiner: V => C,
    //      mergeValue: (C, V) => C,
    //      mergeCombiners: (C, C) => C,
    // V = int  C = tube
    val value1: RDD[(String, (Int, Int))] = value.combineByKey(

      t => (t, 1) // 第一个参数转换
      ,
      (c: (Int, Int), v) => (c._1 + v, c._2 + 1) // 第一个参数转换后,相邻的计算逻辑确定
      ,
      (v1: (Int, Int), v2: (Int, Int)) => (v1._1 + v2._1, v1._2 + v2._2) // 第二个参数计算完后,聚合逻辑
    )

    value1.collect().foreach(println)


  }

}
