package com.atguigu0.core

import org.apache.spark.rdd.RDD
import org.apache.spark.util.LongAccumulator
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @description: 累加器案例,在executor复制一份数据执行在各自之后后在driver端进行合并
 * @time: 2020/6/15 10:38
 * @author: baojinlong
 **/
object AccumulationTest {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("myWordCount").setMaster("local[*]")
    // 创建sparkContext对象,转换操作是懒执行的,并不会马上触发job
    val sc: SparkContext = new SparkContext(sparkConf)
    // 定义总和  var sum = 0 不可以
    val sum: LongAccumulator = sc.longAccumulator("sum")
    val numRdd: RDD[Int] = sc.parallelize(Array(1, 2, 3, 4), 2)
    val numToOne: RDD[(Int, Int)] = numRdd.map(x => {
      // 累加器+1
      sum.add(1)
      (x, 1)
    })

    numToOne.foreach(println)
    println("*********")
    println("sum=" + sum.value)

    // 关闭连接
    sc.stop()
  }
}
