package org.huangrui.spark.java.core.acc;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.util.AccumulatorV2;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;

/**
 * @Author hr
 * @Create 2024-10-19 23:42
 */
public class Spark04_Acc_WordCount {
    public static void main(String[] args) {
        final SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("spark");
        final JavaSparkContext jsc = new JavaSparkContext(conf);
        JavaRDD<String> rdd = jsc.parallelize(Arrays.asList("hello", "spark", "hello"), 2);

        // 累加器 : WordCount
        // 创建累加器对象
        MyAccumulator wcAcc = new MyAccumulator();
        // 向Spark进行注册
        jsc.sc().register(wcAcc);
        rdd.foreach(wcAcc::add);
        // 获取累加器累加的结果
        System.out.println("wcAcc = " + wcAcc.value());

        jsc.close();
    }
}

/**
 * 自定义数据累加器：WordCount
 * 1. 继承AccumulatorV2, 定义泛型
 * IN : 累加器输入的数据类型 String
 * OUT : 累加器返回的数据类型 mutable.Map[String, Int]
 * 2. 重写方法（6）
 */
class MyAccumulator extends AccumulatorV2<String, Map<String, Integer>> {
    private final Map<String, Integer> wcMap = new HashMap<>();

    // 判断是否初始状态
    @Override
    public boolean isZero() {
        return wcMap.isEmpty();
    }

    @Override
    public AccumulatorV2<String, Map<String, Integer>> copy() {
        return new MyAccumulator();
    }

    @Override
    public void reset() {
        wcMap.clear();
    }

    // 获取累加器需要计算的值
    @Override
    public void add(String v) {
        wcMap.merge(v, 1, (oldVal, one) -> oldVal + 1);
    }

    //  Driver合并多个累加器数据
    @Override
    public void merge(AccumulatorV2<String, Map<String, Integer>> other) {
        Map<String, Integer> wcMap1 = this.wcMap;
        other.value().forEach((k, v) -> wcMap1.merge(k, v, (oldVal, one) -> oldVal + 1));
    }

    // 累加器结果返回值
    @Override
    public Map<String, Integer> value() {
        return wcMap;
    }
}
