package com.offcn.spark.p4

import com.offch.bigdata.common.CommonUtil
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Auther: BigData-LGW
 * @ClassName: Accumulator2
 * @Date: 2020/12/8 20:20
 * @功能描述: 需求是统计每一个单词出现次数,在此基础之上只计算spark出现了多少次
 * @Version:1.0
 */
object Accumulator2 {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setAppName("Accumulator2")
            .setMaster("local[*]")
        val sc = new SparkContext(conf)
        val listRDD = sc.parallelize(List(
                "a second spark a spark is shared second",
                "spark shared be shared in second spark"
            ))
        val numAccu = sc.longAccumulator("sparkAccu")
        val pair = listRDD.flatMap(_.split("\\s+")).map(word => {
            if(word == "spark"){
                numAccu.add(1)
            }
            (word,1)
        })
        val ret = pair.reduceByKey(_+_)
        println("action前，累加结果：" + numAccu.value)
        ret.foreach(println)
        println("action后，累加结果：" + numAccu.value)
        numAccu.reset()//重置累加器的值
        println("-------------重复调用--------------------")
        pair.count()
        println("重复调用累加器，累加结果：" + numAccu.value)
        CommonUtil.sleep(1000000)
        sc.stop()
    }
}
