package com.study.core

import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.{Aggregator, HashPartitioner, SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/**
 * @program: spark2.3.2
 * @author: Zhoujian
 * @date: 2022-06-09 19:53
 * @version: 1.0
 * @description:  仿照实现groupBy 和 groupByKey 方法
 * */
object CustomGroup {

	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setMaster("local[2]").setAppName("count")
		val sc = new SparkContext(conf)

		val sourceRDD: RDD[String] = sc.textFile("file:///E:\\workdocument\\faureDetect\\wordCount.txt",2)
		val wordAndOne = sourceRDD.map(line => (line.trim, 1))

		customGroupByKey(wordAndOne)
		customGroupBy(wordAndOne)
		sc.stop()

	}

	/**
	 *  val wordAndOne: RDD[(String, Iterable[Int])] = sourceRDD.map(word => (word.trim, 1)).groupByKey(3)
	 *  groupByKey 为 PairRDDFunctions 类中的方法
	 *  .mapValues(_.sum)
	 * @param wordAndOne 父rdd
	 */
	private def customGroupByKey(wordAndOne: RDD[(String, Int)]) = {
		val shuffledRdd = new ShuffledRDD[String, Int, ArrayBuffer[Int]](wordAndOne, new HashPartitioner(3))
		shuffledRdd.setMapSideCombine(false)
		val createCombiner = (v: Int) => ArrayBuffer(v)
		val mergeValue = (buf: ArrayBuffer[Int], v: Int) => buf += v
		val mergeCombiners = (c1: ArrayBuffer[Int], c2: ArrayBuffer[Int]) => c1 ++= c2
		shuffledRdd.setAggregator(Aggregator[String, Int, ArrayBuffer[Int]](createCombiner, mergeValue, mergeCombiners))
		shuffledRdd.saveAsTextFile("out")
	}

	/**
	 * val wordAndOne: RDD[(String, Iterable[(String, Int)])] = sourceRDD.map(word => (word.trim, 1)).groupBy(_._1)
	 * groupBy 为 RDD 类中的方法
	 * .mapValues( iter => iter.map( x => x._2).sum)
	 * @param wordAndOne 父rdd
	 */
	private def customGroupBy(wordAndOne: RDD[(String, Int)]) = {
		val grouped = wordAndOne.map(x => (x._1, x)).groupByKey()
		grouped.saveAsTextFile("out-1")
	}


}
