package cn.darksoul3.spark.operator

import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.{Aggregator, HashPartitioner, SparkConf, SparkContext}

object ReduceByKey {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("group by key").setMaster("local[*]")

    val sc = new SparkContext(conf)

    val words: RDD[String] = sc.parallelize(List(
      "spark", "hadoop", "hive", "spark",
      "spark", "flink", "spark", "hbase",
      "kafka", "kafka", "kafka", "kafka",
      "hadoop", "flink", "hive", "flink",
    ), numSlices = 4)

    val word = words.map((_, 1))

    val shuffled = new ShuffledRDD[String, Int, Int](word, new HashPartitioner(word.getNumPartitions))

    val f1 = (x: Int) => x
    val f2 = (m: Int, n: Int) => m + n
    val f3 = (g1: Int, g2: Int) => g1 + g2

    val aggregator = new Aggregator[String, Int, Int](f1, f2, f3)
    shuffled.setMapSideCombine(true)
    shuffled.setAggregator(aggregator)

    shuffled.saveAsTextFile("reduce-by-key-out")

    sc.stop()

  }
}
