package com.shujia.flink.sink

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.Semantic
import org.apache.flink.streaming.connectors.kafka.internals.KeyedSerializationSchemaWrapper
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer}

object Demo5SinkKafka {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment




    // 每 1000ms 开始一次 checkpoint// 每 1000ms 开始一次 checkpoint
    env.enableCheckpointing(20000)
    // 高级选项：

    // 设置模式为精确一次 (这是默认值)
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

    // 确认 checkpoints 之间的时间会进行 500 ms
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)

    // Checkpoint 必须在一分钟内完成，否则就会被抛弃
    env.getCheckpointConfig.setCheckpointTimeout(60000)

    // 同一时间只允许一个 checkpoint 进行
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

    // 开启在 job 中止后仍然保留的 externalized checkpoints
    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)

    //设置checkpoint 保存位置为外部持久化系统
    env.setStateBackend(new FsStateBackend("hdfs://node1:9000/flink/checkpoint"))



    val kafkaProps = new Properties()
    kafkaProps.setProperty("zookeeper.connect", "node2:2181,node3:2181,node1:2181")
    kafkaProps.setProperty("bootstrap.servers", "node2:9092,node3:9092,node1:9092")
    kafkaProps.setProperty("group.id", "asdasda")

    //构建kafkaSource
    //kafka-console-producer.sh --broker-list node1:9092 --topic flink
    val kafkaSource = new FlinkKafkaConsumer[String]("flink", new SimpleStringSchema, kafkaProps)

    kafkaSource.setStartFromEarliest()

    val ds = env.addSource(kafkaSource)

    val countDS = ds.flatMap(_.split(","))



    val sinProperties = new Properties()
    sinProperties.setProperty("bootstrap.servers", "node1:9092")
    sinProperties.setProperty("transaction.timeout.ms", 5 * 60 * 100 + "")

    //kafka-console-consumer.sh --zookeeper   node1:2181,node2:2181,node3:2181   --from-beginning --topic flink1
    val kafkaProducer = new FlinkKafkaProducer[String](
      "flink1", // target topic
      new KeyedSerializationSchemaWrapper[String](new SimpleStringSchema),
      sinProperties,
      Semantic.EXACTLY_ONCE
    )

    //数据保存到kafka
    countDS.addSink(kafkaProducer)


    env.execute()
  }
}
