package com.offcn.bigdata.spark.p3

import org.apache.spark.{SparkConf, SparkContext}

/**
  * accumulator的操作
  *     自定义累加器
  */
object _04AccumulatorOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[*]")
            .setAppName(s"${_04AccumulatorOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)

        val lines = sc.textFile("file:/E:/work/2020-0828期大数据/workspace/spark-parent-0828/data/accumulator.txt")

        val words = lines.flatMap(line => line.split("\\s+"))
        val atAcc = sc.longAccumulator("atAcc")
        val aAcc = sc.longAccumulator("aAcc")
        val levelAcc = sc.longAccumulator("levelAcc")
        // a at level出现多少次
        val word2Count = words.map(word => {
            if(word == "a") {
                aAcc.add(1)
            } else if(word == "at") {
                atAcc.add(1)
            } else if(word == "level") {
                levelAcc.add(1)
            }
            (word, 1)
        }).reduceByKey(_+_)

        word2Count.foreach(println)
        println("---------------累加器的值-----------------------")
        println(s"atAccu: ${atAcc}")
        Thread.sleep(200000)
        sc.stop()
    }
}

