package cn.darksoul3.spark.operator

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object GroupByKey {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("group by key").setMaster("local[*]")

    val sc = new SparkContext(conf)

    val words: RDD[String] = sc.parallelize(List(
      "spark", "hadoop", "hive", "spark",
      "spark", "flink", "spark", "hbase",
      "kafka", "kafka", "kafka", "kafka",
      "hadoop", "flink", "hive", "flink",
    ), numSlices = 4)

    val word = words.map((_, 1))

    //val grouped: RDD[(String, Iterable[Int])] = word.groupByKey()
    //grouped.saveAsTextFile("group-out")

    //word.groupBy(_._1)

    //    val shuffled: ShuffledRDD[String, Int, ArrayBuffer[Int]] =
    //      new ShuffledRDD[String, Int, ArrayBuffer[Int]](word,new HashPartitioner(word.getNumPartitions))
    //
    //    val createCombiner = (v: Int) => ArrayBuffer(v)
    //    val mergeValue = (buf: ArrayBuffer[Int], v: Int) => buf += v
    //    val mergeCombiners = (c1: ArrayBuffer[Int], c2: ArrayBuffer[Int]) => c1 ++= c2
    //    val aggregator = new Aggregator[String,Int,ArrayBuffer[Int]](createCombiner,mergeValue,mergeCombiners)
    //    shuffled.setAggregator(aggregator)
    //    shuffled.saveAsTextFile("shuffle-out")

    //    val grouped = word.groupByKey()
    //    val reduced = grouped.mapValues(_.sum)
    //    reduced.saveAsTextFile("reduce-out")

    //    val f1 = (x: Int) => x
    //    val f2 = (m: Int, n: Int) => m + n
    //    val f3 = (g1: Int, g2: Int) => g1 + g2
    //
    //    val reduced = word.combineByKey(f1, f2, f3)
    //    reduced.saveAsTextFile("reduce-out")


    sc.stop()
  }


}
