package cn.wangjie.spark.operations.agg

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

/**
 * PairRDDFunctions类中聚合函数：
 *     - groupByKey
 *     - reduceByKey/foldByKey
 *     - aggregateByKey
 *     - combineByKey  - 基本不用
 *  TODO：实现词频统计WordCount
 */
object SparkAggByKeyTest {
	def main(args: Array[String]): Unit = {
		
		// 构建SparkContext上下文实例对象
		val sc: SparkContext = {
			// 1.a 创建SparkConf对象，设置应用属性，比如应用名称和运行模式
			val sparkConf = new SparkConf()
				.setAppName(this.getClass.getSimpleName.stripSuffix("$"))
				.setMaster("local[2]")
			// 1.b 创建实例对象
			SparkContext.getOrCreate(sparkConf)
		}
		
		// 从本地文件系统读取数据
		val inputRDD: RDD[String] = sc.textFile("datas/wordcount/wordcount.data")
		
		val etlRDD: RDD[(String, Int)] = inputRDD
			// 过滤不合格的数据
			.filter(line => null != line && line.trim.length > 0)
			// 每行数据分割为单词
			.flatMap(line => line.trim.split("\\s+"))
			// 每个单词出现一次
			.mapPartitions{iter => iter.map(word => (word, 1))}
		
		println("===================== groupByKey =====================")
		etlRDD
			.groupByKey() //
			// 对Value中值进行累加和
			.map{tuple =>
				val word = tuple._1
				val count = tuple._2.sum
				word -> count
			}
			.foreach(println)
		
		println("===================== reduceByKey =====================")
		etlRDD
			.reduceByKey((tmp, item) => tmp + item)
			.foreach(println)
		
		println("===================== aggregateByKey =====================")
		etlRDD
			/*
				def aggregateByKey[U: ClassTag]
				(zeroValue: U)
				(
					seqOp: (U, V) => U,
	                combOp: (U, U) => U
	            ): RDD[(K, U)]
			 */
			.aggregateByKey(0)(
				(tmp, item) => tmp + item, //
				(tmp, item) => tmp + item
			)
			.foreach(println)
		
		Thread.sleep(100000000)
		
		// 应用结束，关闭资源
		sc.stop()
	}
	
}
