package com.shujia.spark.core

import java.lang

import org.apache.spark.rdd.RDD
import org.apache.spark.util.LongAccumulator
import org.apache.spark.{SparkConf, SparkContext}

object Demo21Accumulator {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("accumulator")
      .setMaster("local")

    val sc = new SparkContext(conf)

    val listRDD: RDD[Int] = sc.parallelize(List(1, 2, 3, 4, 5, 6, 7, 8, 9))

    /**
      * 累加器, 只能累加
      *
      * 累加器只能在Driver定义
      * 累加器只能在Executor累加
      * 累加器只能在Driver读取
      */

    //1、在Driver端定义累加器
    val accumulator: LongAccumulator = sc.longAccumulator

    listRDD.foreach(s =>{
      //2、在Executor端累加
      accumulator.add(1)
    })

    //3、在Driver端调用
    val value: lang.Long = accumulator.value

    println("累加次数:"+value)

    /**
      *
      * 累加器的使用
      *
      * 如果不适用累加器需要单独启动一个job计算总人数
      * 使用累加器，累加计算和班级人数的计算在一起计算出来
      */

    val student: RDD[String] = sc.textFile("data/students.txt")

    //student.collect

    //定义累加器
    val stuNum: LongAccumulator = sc.longAccumulator

    val clazzRDD: RDD[(String, Int)] = student.map(s => {

      stuNum.add(1)

      val clazz: String = s.split(",")(4)

      (clazz,1)

    })

    val reduceByKeyRDD: RDD[(String, Int)] = clazzRDD.reduceByKey(_+_)


    reduceByKeyRDD.foreach(println)

    val stuCount: lang.Long = stuNum.value

    println("学生总数为:"+stuCount)

  }

}
