package com.atguigu1.core.operator

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 *
 * @description: combineByKey案例
 * @time: 2021-03-12 11:45
 * @author: baojinlong
 **/
object Spark24CombineByKey {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("rdd")
    // 设置rdd分区数字
    val sparkContext = new SparkContext(conf)
    // 排序后分区数量不变 hash range分区 python分区是私有的
    val rddValue: RDD[(String, Int)] = sparkContext.makeRDD(Seq(("a", 1), ("a", 2), ("b", 3), ("b", 4), ("b", 5), ("a", 6)), 2)

    /**
     * combineByKey需要三个参数
     * 第一:将相同的key的第一个数据进行结构的转换,实现操作
     * 第二:分区内计算规则
     * 第三:分区间计算规则
     * createCombiner: V => C,
     * mergeValue: (C, V) => C,
     * mergeCombiners: (C, C) => C,
     * V:int C:元组
     */
    val resultRdd: RDD[(String, (Int, Int))] = rddValue.combineByKey(
      v => (v, 1),
      (U: (Int, Int), V) => {
        (U._1 + V, U._2 + 1)
      },
      (t1: (Int, Int), t2: (Int, Int)) => {
        (t1._1 + t2._1, t1._2 + t2._2)
      }
    )

    resultRdd.collect.foreach(println)
    sparkContext.stop()
  }
}
