package com.shujia.flink.core

import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup

object Demo1Checkpoint {
  def main(args: Array[String]): Unit = {

    /**
      * 集群运行
      * 修改配置文件 flink-conf.yaml
      * state.backend: filesystem
      *
      *
      *
      * 重启任务的hi后需要指定-s   从哪一个目录中恢复任务
      *
      *
      * flink run -m yarn-cluster -s hdfs://node1:9000/flink/checkpoint/17eb65f0dcebd0a484bf4d0991befd4b/chk-275 -c com.shujia.flink.core.Demo1Checkpoint flink-1.0-jar-with-dependencies.jar
      */

    val env = StreamExecutionEnvironment.getExecutionEnvironment


    // 每 1000ms 开始一次 checkpoint// 每 1000ms 开始一次 checkpoint
    env.enableCheckpointing(1000)
    // 高级选项：

    // 设置模式为精确一次 (这是默认值)
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

    // 确认 checkpoints 之间的时间会进行 500 ms
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)

    // Checkpoint 必须在一分钟内完成，否则就会被抛弃
    env.getCheckpointConfig.setCheckpointTimeout(60000)

    // 同一时间只允许一个 checkpoint 进行
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

    // 开启在 job 中止后仍然保留的 externalized checkpoints
    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)

    //设置checkpoint 保存位置为外部持久化系统
    env.setStateBackend(new FsStateBackend("hdfs://node1:9000/flink/checkpoint"))

    val kafkaProps = new Properties()
    kafkaProps.setProperty("zookeeper.connect", "node2:2181,node3:2181,node1:2181")
    kafkaProps.setProperty("bootstrap.servers", "node2:9092,node3:9092,node1:9092")
    kafkaProps.setProperty("group.id", "asdasda")

    //构建kafkaSource
    val kafkaSource = new FlinkKafkaConsumer[String]("flink", new SimpleStringSchema, kafkaProps)

    kafkaSource.setStartFromEarliest()

    val ds = env.addSource(kafkaSource)

    val countDS = ds
      .flatMap(_.split(","))
      .map(word => (word, 1))
      .keyBy(_._1)
      .reduce((x, y) => (x._1, x._2 + y._2))


    countDS.print()


    env.execute()
  }
}
