package cn.huq.day02

import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.{Aggregator, HashPartitioner, SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

object GroupByKeyDemo {

  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext(config = new SparkConf().setAppName("GroupByKeyDemo").setMaster("local"))

    val dataRDD = sc.textFile("hdfs://hadoop102/input")

    val wordAndOne: RDD[(String, Int)] = dataRDD.flatMap(_.split(" ")).map((_, 1))

    // f1:该分区第一次出现key的value装进ArrayBuffer
    val f1 = (v: Int) => ArrayBuffer[Int](v)
    // f2:分区内进行group，将key相同的value， append到ArrayBuffer
    val f2 = (acc: ArrayBuffer[Int], in: Int) => acc += in
    // f3：全局group,将key相同的value的ArrayBuffer合并
    val f3 = (acc1: ArrayBuffer[Int], acc2: ArrayBuffer[Int]) => acc1 ++= acc2

    val shuffled: ShuffledRDD[String, Int, ArrayBuffer[Int]] =
      new ShuffledRDD[String, Int, ArrayBuffer[Int]](wordAndOne, new HashPartitioner(wordAndOne.partitions.length))

    shuffled.setMapSideCombine(false)
    shuffled.setAggregator(new Aggregator[String, Int, ArrayBuffer[Int]](f1, f2, f3))

    val res: Array[(String, ArrayBuffer[Int])] = shuffled.collect()
    println(res.toBuffer)

    sc.stop()
  }

}
