package cn.darksoul3.spark.operator

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object AggregateByKey {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("group by key").setMaster("local[*]")

    val sc = new SparkContext(conf)

    val words: RDD[String] = sc.parallelize(List(
      "spark", "hadoop", "hive", "spark",
      "spark", "flink", "spark", "hbase",
      "kafka", "kafka", "kafka", "kafka",
      "hadoop", "flink", "hive", "flink",
    ), numSlices = 4)

    val word = words.map((_, 1))
    val aggregate = word.aggregateByKey(0)(_ + _, _ + _)
    aggregate.saveAsTextFile("aggregate-by-key-out")

    sc.stop()

  }
}
