package cn.tedu.stream

import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.streaming.api.scala.{DataStream, KeyedStream, StreamExecutionEnvironment, WindowedStream}
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow

/**
 * @author Amos
 * @date 2022/5/22
 */

object StreamWordCount {
  def main(args: Array[String]): Unit = {
    // 1. 构建流处理的环境
    val env = StreamExecutionEnvironment.getExecutionEnvironment

    // 2. 指定数据源 hello flink hbase flume
    val source: DataStream[String] = env.socketTextStream("hadoop01", 9999)

    // 3. 数据的处理
    import org.apache.flink.api.scala._
    val words: DataStream[String] = source.flatMap(_.split(" "))
    val wordAndOne: DataStream[(String, Int)] = words.map((_, 1))
    // 分流统计
    val keyedStream: KeyedStream[(String, Int), Tuple] = wordAndOne.keyBy(0)
    //    // (1) 无界流的统计
    //    val result: DataStream[(String, Int)] = keyedStream.sum(1)
        // (2) 有界流的统计，每隔5s统计一次
    val windowStream: WindowedStream[(String, Int), Tuple, TimeWindow] = keyedStream.timeWindow(Time.seconds(5))
    val result: DataStream[(String, Int)] = windowStream.sum(1)



    // 4. 数据的sink
    result.print()
    env.execute() // 流处理必须加上
  }

}
