package com.spark.core.transformation

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/**
 * combineByKey
 * 首先给RDD中每个分区中的每个key一个初始值, 函数中传递了第一条记录的value
 * 其次在RDD每个分区内部 相同的key聚合一次
 * 再次在RDD不同的分区之间将相同的key结果聚合一次
 */
object Demo27_combineByKey {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("combineByKey")
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")

    val rdd1: RDD[(String, Int)] = sc.makeRDD(List[(String, Int)](
      ("zhangsan", 10), ("zhangsan", 20), ("wangwu", 30),
      ("lisi", 40), ("zhangsan", 50), ("lisi", 60),
      ("wangwu", 70), ("wangwu", 80), ("lisi", 90)
    ), 3)

    rdd1.mapPartitionsWithIndex((index: Int, iter: Iterator[(String, Int)]) => {
      val arr = ArrayBuffer[(String, Int)]()
      iter.foreach(elem => {
        arr.append(elem)
        println("rdd1 partition index = " + index + ",value = " + elem)
      })
      arr.iterator
    }).count()
    println("++++++++++++++++++++++++++++++++++++++++++++")

    /**
     * 0号分区：("zhangsan", 10), ("zhangsan", 20), ("wangwu", 30)
     * 1号分区：("lisi", 40), ("zhangsan", 50), ("lisi", 60)
     * 2号分区：("wangwu", 70), ("wangwu", 80), ("lisi", 90)
     *
     * 初始化后, 取当前分区中相同key的第一个值, 参与第一个函数的计算：
     * 0号分区：("zhangsan", 10hello), ("zhangsan", 20), ("wangwu", 30hello)
     * 1号分区：("lisi", 40hello), ("zhangsan", 50hello), ("lisi", 60)
     * 2号分区：("wangwu", 70hello), ("wangwu", 80), ("lisi", 90hello)
     *
     * 经过RDD分区内的合并后:
     * 0号分区：("zhangsan", 10hello~20)，("wangwu", 30hello)
     * 1号分区：("lisi", 40hello~60), ("zhangsan", 50hello)
     * 2号分区：("wangwu", 70hello~80),("lisi", 90hello)
     *
     * 经过RDD分区之间的合并：("zhangsan", 10hello~20#50hello),("lisi",40hello~60#90hello),("wangwu", 30hello#70hello~80)
     */
    val result: RDD[(String, String)] = rdd1.combineByKey((v: Int) => {
      v + "hello"
    }, (acc1: String, v: Int) => {
      acc1 + "~" + v
    }, (acc1: String, acc2: String) => {
      acc1 + acc2
    })



    // reduceByKey 计算相同key的value 和
    // val result: RDD[(String, Int)] = rdd1.reduceByKey(_ + _)

    // combineByKey实现 上面的reduceByKey逻辑
    //    val result: RDD[(String, Int)] = rdd1.combineByKey((v: Int) => {
    //      v
    //    }, (v1: Int, v2: Int) => {
    //      v1 + v2
    //    }, (acc1: Int, acc2: Int) => {
    //      acc1 + acc2
    //    })

    result.foreach(println)
  }
}
