package com.weic.spark.scala.p3.shared

import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Auther:BigData-weic
 * @ClassName:_02AccumulatorOps
 * @Date:2020/12/8 23:29
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object _02AccumulatorOps {
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf()
    		.setAppName("_02AccumulatorOps")
    		.setMaster("local[*]")
		val sc = new SparkContext(conf)
		val listRDD = sc.parallelize(
			List(
				"a second spark a spark is shared second",
				"spark shared be shared in second spark"
			)
		)
		//每个单词的count
		val ret = listRDD.flatMap(_.split("\\s+")).map((_, 1)).reduceByKey(_ + _)
		ret.foreach(println)
		//只求spark出现的次数
		ret.filter{case (word,count) => word=="spark"}.foreach(println)

		//使用累加器只求spark的出现次数
		val numAccu = sc.longAccumulator("sparkAccu")
		//t统计每个单词出现是次数，再次基础上值显示spark出现的次数
		val mapAccu = listRDD.flatMap(_.split("\\s+")).map(word => {
			if (word == "spark") {
				numAccu.add(1)
			}
			(word, 1)
		})
		val ret1 = mapAccu.reduceByKey(_+_)
		println("-----action--前--》"+numAccu.value)
		ret1.foreach(println)
		println("-----action--后--》"+numAccu.value)

		/**
		 * 重复调用累加器，一定要进行重置累加器
		 */
		//重置累加器
		numAccu.reset()
		mapAccu.count
		println("-----action--后2------》"+numAccu.value)


		sc.stop()
	}

}
