package com.shujia.kafka

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.runtime.state.StateBackend
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer

import java.util.Properties

object Demo02FlinkOnKafkaExactlyOnce {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    // 每10000ms做一次checkpoint
    env.enableCheckpointing(20000)
    // 高级选项(可选)
    // 设置CheckPoint的模式为EXACTLY_ONCE精确一次/完全一次(默认)
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    // 设置两个CheckPoint任务之间的时间间隔
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)
    // 设置CheckPoint的超时时间
    env.getCheckpointConfig.setCheckpointTimeout(60000)
    // 设置同一时刻最多能有多少个CheckPoint任务
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)
    // 设置在任务取消时不清理CheckPoint保存的状态
    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
    // 设置CheckPoint目录 使用文件系统作为状态后端（保存状态的地方）
    val fsStateBackend: StateBackend = new FsStateBackend("hdfs://master:9000/flink/wc/checkpoint")
    env.setStateBackend(fsStateBackend)



    val properties = new Properties()
    // 指定Kafka Broker集群的地址
    properties.setProperty("bootstrap.servers", "master:9092,node1:9092,node2:9092")
    // 指定消费者组ID
    properties.setProperty("group.id", "test")

    val flinkKafkaConsumer: FlinkKafkaConsumer[String] =
      new FlinkKafkaConsumer[String]("exactly", new SimpleStringSchema(), properties)

    flinkKafkaConsumer.setStartFromEarliest() // 从头开始消费

    // 将Kafka的Consumer注册为Source -- 无界流
    val kafkaDS: DataStream[String] = env.addSource(flinkKafkaConsumer)

    // 基于Kafka数据统计单词数量
    kafkaDS
      .flatMap(_.split(","))
      .map((_, 1))
      .keyBy(_._1)
      .sum(1)
      .print()

    env.execute()

  }

}
