package com.gitee.etl.app

import com.gitee.etl.process.LogDataEtl
import com.gitee.filter.IPSum
import com.gitee.utils.GlobalConfigUtil
import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig
import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, _}

object App {
  def main(args: Array[String]): Unit = {
    val envs: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    //配置Checkpointing启动的时间间隔
    envs.enableCheckpointing(5000)

    //配置Checkpointing的模式精准一次
    envs.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

    //配置Checkpointing存储位置
    envs.setStateBackend(new FsStateBackend("hdfs://node01:9000/flink/checkpoint"))

    //同时只允许一个Checkpointing
    envs.getCheckpointConfig.setMaxConcurrentCheckpoints(1)
    //当程序关闭时额外触发Checkpointing
    //DELETE_ON_CANCELLATION 如果是取消作业不会保留状态,但是作业异常终止会保留状态
    //RETAIN_ON_CANCELLATION 如果取消作业会保留状态,异常终止也会保留
    //所以按道理来说我的hdfs上应该会有很多checkpoint的状态(因为我每次都是配置的第二个)
    envs.getCheckpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
    //设置重启策略
    envs.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 5000))

    //测试代码
    //envs.fromElements("spark", "flink", "mapreduce").map((_, 1)).keyBy(0).sum(1).print()

    //将IP库放入分布式缓存中
    envs.registerCachedFile(GlobalConfigUtil.`ip.file.path`,"ip")

    //用户日志数据导入dw层
    val logDataEtl = new LogDataEtl(envs)
    logDataEtl.process()

    //过滤ip
    val sum = new IPSum(envs)
    sum.recognize()

    envs.execute("oppose_guagua")
  }

}
