package com.shujia.spark.core

import java.lang

import org.apache.spark.rdd.RDD
import org.apache.spark.util.LongAccumulator
import org.apache.spark.{SparkConf, SparkContext}

object Demo21Accumulator {

  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("spark")


    val sc = new SparkContext(conf)


    val rdd: RDD[Int] = sc.parallelize(List(1, 2, 3, 4, 5, 6, 7, 8, 9))


    var j = 0

    rdd.foreach(i => {

      j += 1
    })

    println(j)


    /**
      * 累加器, 只能累加
      *
      * 累加器只能在Driver定义
      * 累加器只能在Executor累加
      * 累加器只能在Driver读取
      *
      *
      */


    //1、在Driver端定义累加器
    val accumulator: LongAccumulator = sc.longAccumulator

    val rdd2: RDD[Unit] = rdd.map(i =>

      //2、在Executor端累加
      accumulator.add(i)

    )

    rdd2.foreach(println)

    //3、在Driver读取累加结果
    //累加器获取只能在action算子之后获取
    val count: lang.Long = accumulator.value

    println(count)


    /**
      *
      * 累加器的使用
      *
      * 如果不适用累加器需要单独启动一个job计算总人数
      * 使用累加器，累加计算和班级人数的计算在一起计算出来
      *
      *
      */

    val student: RDD[String] = sc.textFile("data/students.txt")

    //定义累加器
    val studentNum: LongAccumulator = sc.longAccumulator

    val kvRDD: RDD[(String, Int)] = student.map(stu => {

      //累加
      studentNum.add(1)

      val clazz: String = stu.split(",")(4)
      (clazz, 1)
    })

    val clazzNumRDD: RDD[(String, Int)] = kvRDD.reduceByKey(_ + _)


    //学生的总人数
    val stuNum: lang.Long = studentNum.value

    clazzNumRDD.foreach(println)


  }

}
