package com.fwmagic.spark.core.transformations

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  *
  * 传入三个函数：
  * 1：将分区中第一个元素添加到List中，
  * 2：每个分区相同的Key添加到一个各自的List中
  * 3:各个分区的List进行操作：集合合并
  */
object CombineByKeyDemo {
    def main(args: Array[String]): Unit = {
        val conf: SparkConf = new SparkConf()
                .setAppName(this.getClass.getSimpleName)
                .setMaster("local[*]")

        val sc: SparkContext = new SparkContext(conf)

        val rdd: RDD[(String, Int)] = sc.parallelize(List(("cat", 2), ("cat", 5), ("mouse", 4),
            ("cat", 12), ("dog", 12), ("mouse", 2)), 2)

        /*
        *
        * 传入三个函数：
        *   1：将分区中第一个元素添加到List中，
        *   2：每个分区相同的Key添加到一个各自的List中
        *   3:各个分区的List进行操作：集合合并
        *
        *   (dog,List(12))
            (cat,List(2, 5, 12))
            (mouse,List(4, 2))
        * */
        val combineByKeyRDD: RDD[(String, List[Int])] = rdd.combineByKey(
            x => List(x),
            (a: List[Int], b: Int) => a :+ b,
            (m: List[Int], n: List[Int]) => m ++ n)

        /*
            (dog,12)
            (cat,19)
            (mouse,6)
         */
        val res: RDD[(String, Int)] = combineByKeyRDD.map(tp => (tp._1,tp._2.sum))
        //val res: RDD[(String, Int)] = combineByKeyRDD.mapValues(list => list.sum)

        res.collect().foreach(println)

        sc.stop()
    }
}
