package com.shengzai.flink.tf

import org.apache.flink.api.common.functions.ReduceFunction
import org.apache.flink.streaming.api.scala._

object Demo5Reduce {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val linesDS: DataStream[String] = env.readTextFile("data/words.txt")

    val kvDS: DataStream[(String, Int)] = linesDS
      .flatMap(_.split(","))
      .map((_, 1))

    val keyByDS: KeyedStream[(String, Int), String] = kvDS.keyBy(_._1)

    /**
     * reduce: 在keyBy之后对数据进行聚合计算
     */
    val countDS: DataStream[(String, Int)] = keyByDS
      .reduce((x, y) => (x._1, x._2 + y._2))

    //countDS.print()


    val javaDS: DataStream[(String, Int)] = keyByDS
      .reduce(new ReduceFunction[(String, Int)] {
        override def reduce(x: (String, Int), y: (String, Int)): (String, Int) = {
          (x._1, x._2 + y._2)
        }
      })
    javaDS.print()

    env.execute()

  }
}
