package com.offcn.spark.p4

import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Auther: BigData-LGW
 * @ClassName: Accumulator
 * @Date: 2020/12/8 20:03
 * @功能描述: spark的累加器操作
 * @Version:1.0
 */
object Accumulator {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setAppName("Accumulator")
            .setMaster("local[*]")
        val sc = new SparkContext(conf)
        val listRDD = sc.parallelize(
            List(
                "a second spark a spark is shared second",
                "spark shared be shared in second spark"
            )
        )
        //需求是统计每一个单词出现次数
        val ret = listRDD.flatMap(_.split("\\s+")).map((_,1)).reduceByKey(_+_)
        ret.foreach(println)
        println("---------------只需要统计spark出现了多少次------------------")
        ret.filter{
            case (word,count) => word == "spark"
        }.foreach(println)
        sc.stop()
    }
}
