package com.bigdata.spark.core.rdd.operator.action

import org.apache.spark.rdd.RDD
import org.apache.spark.util.AccumulatorV2
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable


/**
 * @author : ranzlupup
 * @date : 2023/3/2 12:17
 */
object RDD_Acc {
    def main(args: Array[String]): Unit = {
        val sc: SparkContext = new SparkContext(new SparkConf().setMaster("local[*]").setAppName("Acc"))
        val rdd: RDD[String] = sc.makeRDD(List("Hello", "Spark", "Scala"))

        // TODO 自定义累加器
        val wordCountAcc = new MyAccumulator()
        sc.register(wordCountAcc, "wordCountAcc")
        rdd.foreach(word => {
            wordCountAcc.add(word)
        })
        println(wordCountAcc.value)

        sc.stop()
    }
}

class MyAccumulator extends AccumulatorV2[String, mutable.Map[String, Int]] {
    var wordMap: mutable.Map[String, Int] = mutable.Map()

    // 累加器是否为空
    override def isZero: Boolean = {
        wordMap.isEmpty
    }

    // 返回一个累加器
    override def copy(): AccumulatorV2[String, mutable.Map[String, Int]] = {
        new MyAccumulator()
    }

    // 将累加器置空
    override def reset(): Unit = {
        wordMap.clear()
    }

    // 向累加器中追加数据(IN)
    override def add(word: String): Unit = {
        // word ——> (word, count)
        val newCount: Int = wordMap.getOrElse(word, 0) + 1
        wordMap.update(word, newCount)
    }

    // 合并累加器
    override def merge(other: AccumulatorV2[String, mutable.Map[String, Int]]): Unit = {
        val map1 = this.wordMap
        val map2 = other.value

        map2.foreach {
            case (word, count) => {
                val newCount: Int = map1.getOrElse(word, 0) + count
                map1.update(word, newCount)
            }
        }
    }

    // 返回累加器的结果（OUT）
    override def value: mutable.Map[String, Int] = {
        wordMap
    }
}
