package com.shujia.flink.tf

import org.apache.flink.api.common.functions.ReduceFunction
import org.apache.flink.streaming.api.scala._

object Demo6Reduce {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val studentDS: DataStream[String] = env.readTextFile("data/students.txt")

    val kvDS: DataStream[(String, Int)] = studentDS.map(line => (line.split(",")(4), 1))

    /**
      * keyBy+ redue 等同于spark中的reduceBykey
      * 注意;在flink中map端不存在预聚和
      *
      */
    val keyByDS: KeyedStream[(String, Int), String] = kvDS.keyBy(_._1)
    //val reduceDS: DataStream[(String, Int)] = keyByDS.reduce((kv1, kv2) => (kv2._1, kv1._2 + kv2._2))

    val reduceDS: DataStream[(String, Int)] = keyByDS.reduce(new ReduceFunction[(String, Int)] {
      override def reduce(value1: (String, Int), value2: (String, Int)): (String, Int) = {
        (value1._1, value1._2 + value2._2)
      }
    })
    reduceDS.print()

    env.execute()
  }

}
