package cn.tedu.stream.window

import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.streaming.api.scala.function.WindowFunction
import org.apache.flink.streaming.api.scala.{DataStream, KeyedStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.util.Collector

/**
 * @author Amos
 * @date 2022/5/23
 */

object StreamApplyDemo {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment

    // 数据源hello world hello flink
    import org.apache.flink.api.scala._
    val source: DataStream[String] = env.socketTextStream("hadoop01", 9999)

    // 获取分流数据
    val keyedStream: KeyedStream[(String, Int), String] = source.flatMap(_.split(" ")).map((_, 1)).keyBy(_._1)

    // 指定TimeWindow的滚动窗口
    val windowStream = keyedStream.timeWindow(Time.seconds(5))

    // 通过apply完成数据的聚合
    val result = windowStream.apply(new WindowFunction[(String, Int), (String, Int), String, TimeWindow] {
      // 完成数据的聚合
      override def apply(key: String, window: TimeWindow, input: Iterable[(String, Int)], out: Collector[(String, Int)]): Unit = {
        // 数据的处理
        val tuple = input.reduce((x, y) => (x._1, x._2 + y._2))
        // 将数据收集并返回
        out.collect(tuple)
      }
    })

    result.print()
    env.execute()
  }

}
