package com.shujia.flink.tf

import org.apache.flink.api.common.functions.ReduceFunction
import org.apache.flink.streaming.api.scala._

object Demo5Reduce {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    val kvDS: DataStream[(String, Int)] = linesDS.flatMap(_.split(",")).map((_, 1))

    val keyByDS: KeyedStream[(String, Int), String] = kvDS.keyBy(_._1)

    /**
     * reduce: 分组之后，对相同的key的数据做聚合计算
     */
    //scala api
    val reduceDS: DataStream[(String, Int)] = keyByDS
      .reduce((kv1, kv2) => (kv1._1, kv1._2 + kv2._2))

    val javaDS: DataStream[(String, Int)] = keyByDS.reduce(new ReduceFunction[(String, Int)] {
      /**
       * reduce: 每一条数据执行一次
       *
       * @param kv1 :前面的聚合结果
       * @param kv2 ：新的数据
       * @return
       */
      override def reduce(kv1: (String, Int), kv2: (String, Int)): (String, Int) = {
        println(kv1)
        println(kv2)
        (kv1._1, kv1._2 + kv2._2)
      }
    })

    javaDS.print()

    env.execute()

  }

}
