package ontime

import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.scala._

/**
  * @author dinghao 
  * @create 2021-07-28 13:19 
  * @message
  *         -s hdfs://namenode:9000/flink/checkpoints/467e17d2cc343e6c56255d222bae3421/chk-56/_metadata
  *
dd 7324234234 2
dd 7324111234 1
dd 7324646734 6
dd 7324546734 5
dd 7324334234 3
dd 7324411234 4
  */
object Checkflink {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    System.setProperty("HADOOP_USER_NAME","work")
    env.setStateBackend(new FsStateBackend(args(1)))

    // 高级选项：
    // 设置模式为exactly-once （这是默认值）
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    // 每隔1000 ms进行启动一个检查点【设置checkpoint的周期】
    env.getCheckpointConfig.setCheckpointInterval(1000)
    // 表示一旦Flink处理程序被cancel后，会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint【详细解释见备注】
    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
    // 确保检查点之间有至少500 ms的间隔【checkpoint最小间隔】// 确保检查点之间有至少500 ms的间隔【checkpoint最小间隔】

    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)
    // 检查点必须在一分钟内完成，或者被丢弃【checkpoint的超时时间】
    env.getCheckpointConfig.setCheckpointTimeout(60000)
    // 同一时间只允许进行一个检查点
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

    env.setParallelism(1)
    val data = env.socketTextStream(args(0), 9999)
      .map(a => {
        val data = a.split(" ")
        (data(0), data(1), data(2))
      }).keyBy(_._1)
    data.mapWithState[(String,String,String),Int] {
      case (t: (String, String, String), checkPoint: Option[Int]) => {
        val i = checkPoint.getOrElse(1)
        println("buffer>>>>" + i)
        (t,Option(t._3.toInt+i))
      }
    }.print("====================>")
    env.execute("checkflink")
  }
}
