package com.study.core

import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.{Aggregator, HashPartitioner, SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/**
 * @program: spark2.3.2
 * @author: Zhoujian
 * @date: 2022-06-09 19:53
 * @version: 1.0
 * @description:  仿照实现groupBy 和 groupByKey 方法
 * */
object CustomReduceByKey {

	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setMaster("local[2]").setAppName("count")
		val sc = new SparkContext(conf)

		val sourceRDD: RDD[String] = sc.textFile("file:///E:\\workdocument\\faureDetect\\wordCount.txt",2)
		val wordAndOne = sourceRDD.map(line => (line.trim, 1))

		customReduceByKey1(wordAndOne)
		customReduceByKey2(wordAndOne)
		sc.stop()

	}

	/**
	 *  通过 shuffledRdd 实现
	 *  val wordAndOne: RDD[(String, Iterable[Int])] = sourceRDD.map(word => (word.trim, 1)).reduceByKey(_ + _)
	 *  reduceByKey 为 PairRDDFunctions 类中的方法
	 * @param wordAndOne 父rdd
	 */
	private def customReduceByKey1(wordAndOne: RDD[(String, Int)]) = {
		val shuffledRdd = new ShuffledRDD[String, Int, Int](wordAndOne, new HashPartitioner(3))
		shuffledRdd.setMapSideCombine(true)
		val createCombiner = (v: Int) => v
		val mergeValue = (old: Int, v: Int) => old + v
		val mergeCombiners = (old: Int, v: Int) => old + v
		shuffledRdd.setAggregator(Aggregator[String, Int, Int](createCombiner, mergeValue, mergeCombiners))
		shuffledRdd.saveAsTextFile("out1")
	}

	/**
	 * 通过调用 PairRDDFunctions 内部 combineByKey 方法实现
	 * @param wordAndOne 父rdd
	 */
	private def customReduceByKey2(wordAndOne: RDD[(String, Int)]) = {
		val createCombiner = (v: Int) => v
		val mergeValue = (old: Int, v: Int) => old + v
		val mergeCombiners = (old: Int, v: Int) => old + v
		val grouped = wordAndOne.combineByKey[Int](createCombiner,mergeValue,mergeCombiners,new HashPartitioner(3))
		grouped.saveAsTextFile("out2")
	}


}
