package spark.core.scala

import org.apache.spark.{SparkContext, SparkConf}

/**
 * Created by ibf on 2018/2/4.
 * 实现计数功能的基础的spark累加器
 */
object AccumulatorsWC {
  def main(args: Array[String]) {
    //1、构建spark上下文
    val conf = new SparkConf()
      .setAppName("AccumulatorsWC")
      .setMaster("local[*]")
    val sc = SparkContext.getOrCreate(conf)

    //2、生成RDD
    val rdd = sc.parallelize(Array(
      "hadoop,spark,hbase",
      "spark,spark,hbase",
      "hadoop,spark,hadoop",
      "",
      "hadoop,spark,hbase",
      "hive,spark,hbase",
      "hue,spark,beeline",
      "flume,spark,hbase",
      "oozie,hue,hbase",
      null,
      "hue,spark,kafka"
    ),3)

    //需求一：实现wc，同时统计输入的记录数量以及最终输出的结果数量
    //在hadoop的mr中，conf.set累加器.increment()
    //3、构建自定义的累加器，来计数
    val inputRecords = sc.accumulator(0,"Input Record Size")
    val partitionNumbers = sc.accumulator(0,"Partition Numbers Size")
    val outputRecords = sc.accumulator(0,"Output Record Size")
    //4、处理数据
   val result =  rdd.flatMap(line => {
      inputRecords.+=(1)
      val nline = if(line==null)"" else line
      nline.split(",")
        //做数据格式的转换，转换成key-value对形式
      .map(word => (word.trim,1))
        //word单词不为空，进行保留统计
      .filter(_._1.nonEmpty)
    }).reduceByKey(_ + _)

    //5、数据输出
    /**
     * foreachPartition这样的API，是一次性将一个分区的所有数据全部读取到
     * 因此，如果使用累加器，在XXXXPartition这样的API当中，只会计算出有几个分区
     */
    result.foreachPartition(iter => {
      //输出数据累加
      partitionNumbers += 1
      iter.foreach(t =>{
        outputRecords += 1
        println(t)
      })
    })

    //6、查看inputRecords，outputRecords最后的数值
    println(s"Input Record Size:${inputRecords.value}")
    println(s"Output Record Size:${outputRecords.value}")
    println(s"Partition Numbers Size:${partitionNumbers.value}")
    //7、查看4040界面线程等待
    Thread.sleep(1000000l)
  }
}
