package com.gitee.window

import java.util.Properties

import com.gitee.utils.GlobalConfigUtil
import com.gitee.window.connect.KafkaSource
import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig
import org.apache.flink.streaming.api.scala.function.WindowFunction
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment, _}
import org.apache.flink.streaming.api.windowing.assigners.{EventTimeSessionWindows, TumblingEventTimeWindows}
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
import org.apache.flink.util.Collector
import org.apache.kafka.clients.CommonClientConfigs
import sun.awt.TimedWindowEvent

/**
  * 演示模拟各个路口通过红绿灯汽车的数量
  * 需求
  * 每5秒钟统计一次，在这过去的5秒钟内，各个路口通过红绿灯汽车的数量--滚动窗口
  * 每5秒钟统计一次，在这过去的10秒钟内，各个路口通过红绿灯汽车的数量--滑动窗口
  * 会话窗口(需要事件时间支持):在30秒内无数据接入则触发窗口计算
  */
object StreamingWindow {
  def main(args: Array[String]): Unit = {
    val envs: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    //TODO checkpoint的设置参数
    //设置检查点
    envs.enableCheckpointing(5000)

    //设置保存模式
    envs.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

    //设置保存路径
    envs.setStateBackend(new FsStateBackend("hdfs://node01:8700/flink/checkpoi"))

    //设置允许最大checkpoint的个数
    envs.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

    //保存失败后重新触发检查点
    //DELETE_ON_CANCELLATION 如果是取消作业不会保留状态,但是作业异常终止会保留状态
    //RETAIN_ON_CANCELLATION 如果取消作业会保留状态,异常终止也会保留
    //所以按道理来说我的hdfs上应该会有很多checkpoint的状态(因为我每次都是配置的第二个)
    envs.getCheckpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
    //设置重启策略
    envs.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 5000))

    //数据源 "路灯编号,通过的数量"
    val data: DataStream[String] = envs.addSource(KafkaSource.getKafkaStreamData(GlobalConfigUtil.`kafka.ods.car`))
    val carSum: DataStream[(String, Int)] = data.map(data => {
      val arr: Array[String] = data.split(",")
      (arr(0), arr(1).toInt)
    })

    //统计10秒内的数据
    //默认情况下窗口长度等于滑动时间,所以我们再不必设置滑动时间
    //思考: 在什么时间触发计算呢?
    //测试1 time-tumbling-window
    //    val result: DataStream[(String, Int)] = carSum.keyBy(0).timeWindow(Time.seconds(10)).sum(1)
    //测试2 time-sliding-window
    //    val result: DataStream[(String, Int)] = carSum.keyBy(0).timeWindow(Time.seconds(100),Time.seconds(5)).sum(1)
    //通过测试我们可以看到,窗口计算是根据滑动时间来触发的,而窗口长度是表示这一段时间对这个窗口的统计


    //基于时间的时间窗口(注意:需要事件时间的支持)
    //指定会话超时，即会话之间的时间间隔，是指在规定的时间内如果没有数据活跃接入，则认为窗口结束，触发窗口计算

    //下面的代码的意思是:30s内没有数据发送则计算前一个窗户的数据
    //注意: 如果数据中没有指定事件时间则一直等待
//    val result: DataStream[(String, Int, Long)] = carSum.keyBy(0).window(EventTimeSessionWindows.withGap(Time.seconds(30))).sum(1)

    //如果有时间事件,滚动窗口和滑动窗口也可以使用如下API
//    carSum.keyBy(0).window(TumblingEventTimeWindows.of(Time.seconds(5))).sum(1)

    //数量窗口
//    val result: DataStream[(String, Int, Long)] = carSum.keyBy(0).countWindow(5).sum(1)

    //自定义窗口
    //WindowFunction[IN, OUT, KEY, W ]
    val timeWindow: WindowedStream[(String, Int), Tuple, TimeWindow] = carSum.keyBy(0).timeWindow(Time.seconds(5))
    val result: DataStream[(String, Int)] = timeWindow.apply(new WindowFunction[(String, Int), (String, Int), Tuple, TimeWindow] {
      override def apply(key: Tuple, window: TimeWindow, input: Iterable[(String, Int)], out: Collector[(String, Int)]): Unit = {
        //当前值加上历史值
        val tuple: (String, Int) = input.reduce((t1, t2) => {
          (t1._1, t1._2 + t2._2)
        })
        out.collect(tuple)
      }
    })

    result.print()
    envs.execute("car_sum")
  }

}
