package com.atguigu1.core.acc

import org.apache.spark.rdd.RDD
import org.apache.spark.util.AccumulatorV2
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

/**
 *
 * @description: 缓存案例
 * @time: 2021-03-12 11:45
 * @author: baojinlong
 **/
object Spark02DiyAcc {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("persistDemo")
    // 设置rdd分区数字
    val sparkContext = new SparkContext(conf)
    val rdd: RDD[String] = sparkContext.makeRDD(Seq("hello", "spark", "hello", "windy"))
    // 累加器:WordCount
    // 创建累加器对象
    val myWordCountAcc = new MyWordCountAcc
    // 向spark进行注册
    sparkContext.register(myWordCountAcc, "myWordCountAcc")
    // 对累加器进行累加
    rdd.foreach(myWordCountAcc.add)
    println(myWordCountAcc.value)
    sparkContext.stop
  }

  /**
   * 输入是每个单词输出是每个单词和相应的个数
   */
  class MyWordCountAcc extends AccumulatorV2[String, mutable.Map[String, Long]] {
    // 定义单词和次数对应的map
    var wcMap: mutable.Map[String, Long] = mutable.Map[String, Long]()

    /**
     * 判断是否初始状态
     *
     * @return
     */
    override def isZero: Boolean = wcMap.isEmpty

    override def copy(): AccumulatorV2[String, mutable.Map[String, Long]] = new MyWordCountAcc

    /**
     * 重置累加器
     */
    override def reset(): Unit = {
      wcMap.clear
    }

    /**
     * 获取累加器需要计算的值
     *
     * @param v
     */
    override def add(v: String): Unit = {
      // 获取当前单词的计数
      val newCount: Long = wcMap.getOrElse(v, 0L) + 1
      // 更新单词个数
      wcMap.update(v, newCount)
    }

    /**
     * 合并数据driver端
     *
     * @param other
     */
    override def merge(other: AccumulatorV2[String, mutable.Map[String, Long]]): Unit = {
      // 遍历其它Executor上面的数据
      other.value.foreach {
        case (word, count) =>
          // 获取已经有的单词个数
          val newCount: Long = wcMap.getOrElse(word, 0L) + count
          // 更新已经存在的
          wcMap.update(word, newCount)
      }
    }

    override def value: mutable.Map[String, Long] = wcMap
  }

}