package cn.huq.day02

import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.{Aggregator, HashPartitioner, SparkConf, SparkContext}

object ReduceByKeyDemo {

  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext(config = new SparkConf().setAppName("reduceByKey").setMaster("local"))

    val dataRDD = sc.textFile("hdfs://hadoop102/input")

    val mapRDD: RDD[(String, Int)] = dataRDD.flatMap(_.split(" ")).map((_, 1))

    val reduced: RDD[(String, Int)] = mapRDD.reduceByKey(_+_)

    // f1:该分区第一次出现key的value进行运算
    val f1 = (x: Int) => x
    // f2:在该分区内将key相同的value进行局部聚合
    val f2 = (a: Int, b: Int) => a + b
    // f3：全局聚合
    val f3 = f2
    val combinedRDD: RDD[(String, Int)] =
      mapRDD.combineByKeyWithClassTag(f1, f2, f3, new HashPartitioner(mapRDD.partitions.length))
//    val reduced: RDD[(String, Int)] = mapRDD.combineByKey(f1,f2,f3)
    println(combinedRDD.collect().toBuffer)
    println(reduced.collect().toBuffer)

    /*----------------------------------*/

    // 底层API
    val shuffledRDD: ShuffledRDD[String, Int, Int] =
      new ShuffledRDD[String, Int, Int](mapRDD, new HashPartitioner(mapRDD.partitions.length))
    shuffledRDD.setMapSideCombine(true)
    val aggregator: Aggregator[String, Int, Int] = new Aggregator[String, Int, Int](f1, f2, f3)
    shuffledRDD.setAggregator(aggregator)

    println(shuffledRDD.collect().toBuffer)
    sc.stop()
  }

}
