package com.bigdata.spark.core.rdd.operator.transform

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author : ranzlupup
 * @since : 2023/4/27 16:45
 */
object RDD_Transform_WordCount {
    def main(args: Array[String]): Unit = {
        val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("RDD")
        val sparkContext: SparkContext = new SparkContext(sparkConf)
        val rdd: RDD[(String, Int)] = sparkContext.makeRDD(
            List(
                ("a", 1), ("a", 2), ("b", 3),
                ("b", 4), ("b", 5), ("a", 6)
            ), 2
        )
        // reduceByKey
        println("=====================")
        rdd.reduceByKey(_ + _).collect().foreach(println)
        println("=====================")
        // foldByKey
        rdd.foldByKey(0)(_ + _).collect().foreach(println)
        println("=====================")
        // aggregateByKey
        rdd.aggregateByKey(0)(
            (x, y) => x + y,
            (x, y) => x + y
        ).collect().foreach(println)
        println("=====================")
        // combineByKey
        rdd.combineByKey(t => t, (t1: Int, t2) => t1 + t2, (t1: Int, t2: Int) => t1 + t2
        ).collect().foreach(println)
        println("=====================")

        rdd.combineByKey(v => v, (v1: Int, v2) => v1 + v2, (v1: Int, v2: Int) => v1 + v2)
            .collect().foreach(println)
    }
}
