package com.atguigu.bigdata.spark


import java.util

import org.apache.spark.util.AccumulatorV2
import org.apache.spark.{SparkConf, SparkContext}


//创建自定义分区
object Spark02_Accumulator16{

  def main(args: Array[String]): Unit = {
    //创建SparkConf
    //s设定spark计算框架的运行环境
    val config: SparkConf = new SparkConf().setMaster("local[*]").setAppName("wordCount")
    //创建Spark上下文环境
    val sc = new SparkContext(config)

     val dataRDD = sc.makeRDD(List("hadoop","hive","hbase","scale","spark"),2)

//    TODO 创建累加器
    val accumulator = new WordAccumulator
//    TODO 注册累加器
    sc.register(accumulator)
    dataRDD.foreach{
      case i =>{
        accumulator.add(i)
      }
    }
    println(accumulator.value)
    sc.stop()
  }

  class WordAccumulator extends  AccumulatorV2[String,util.ArrayList[String]]{
    val list = new util.ArrayList[String]()
//    当前累加器是否是初始化状态
    override def isZero: Boolean = {
      list.isEmpty
    }

    override def copy(): AccumulatorV2[String, util.ArrayList[String]] = {
      new WordAccumulator()
    }
//  重置累加器对象
    override def reset(): Unit = {
      list.clear()
    }
//向累加器中增加数据
    override def add(v: String): Unit = {
      if(v.contains("h")){
        list.add(v)
      }
    }

    override def merge(other: AccumulatorV2[String, util.ArrayList[String]]): Unit = {
      list.addAll(other.value)
    }
//获取累加器结果
    override def value: util.ArrayList[String] = list
  }
}
