package com.study.flink.dataset

import org.apache.flink.api.common.accumulators.LongCounter
import org.apache.flink.api.common.functions.RichMapFunction
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.configuration.Configuration
import org.apache.flink.core.fs.FileSystem.WriteMode

/**
  * 累加器
  *
  * @author stephen
  * @create 2019-05-26 23:30
  * @since 1.0.0
  */
object FlinkAccumulatorDemo {

  def main(args: Array[String]): Unit = {
    // 1、执行环境
    val env = ExecutionEnvironment.getExecutionEnvironment

    // 2、指定数据源
    import org.apache.flink.api.scala._
    val dataStream = env.fromElements("hadoop", "spark", "storm", "flink")

    // 3、对数据进行操作
    val resultStream = dataStream.map(new RichMapFunction[String, String] {

      // step 1: 定义计数器
      val counter = new LongCounter()

      override def open(parameters: Configuration): Unit = {
        // step 2: 注册计数器
        getRuntimeContext.addAccumulator("counter", counter)
      }

      override def map(in: String): String = {
        // step 3: 计数
        counter.add(1)
        in
      }
    })

    // 4、指定数据输出位置
    resultStream.writeAsText("/Users/stephen/output", WriteMode.OVERWRITE).setParallelism(3)

    // 5、得到执行结果
    // step 4: 得到计数器
    val executionResult = env.execute("Flink Accumulator Demo")
    val num = executionResult.getAccumulatorResult[Long]("counter")
    println("num = " + num)
  }
}
