package com.fwmagic.flink.state.source;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;

import java.util.Properties;

public class KafkaSourceDemo {
    public static void main(String[] args) throws Exception{
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(3);

        env.enableCheckpointing(5000);

        // 设置模式为exactly-once （这是默认值）
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        // 表示一旦Flink处理程序被cancel后，会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint【详细解释见备注】
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        env.setStateBackend(new FsStateBackend("file:///Users/fangwei/Downloads/chk"));

        //手动触发异常
        env.socketTextStream("localhost", 8888).map(new MapFunction<String, String>() {
            @Override
            public String map(String value) throws Exception {
                if(value.equals("err")){
                    throw new RuntimeException("======>程序出异常了！");
                }
                return value;
            }
        }).print();

        Properties prop = new Properties();
        prop.setProperty("bootstrap.servers", "localhost:9092");
        prop.setProperty("group.id", "gp11");

        String topic = "test11";

        FlinkKafkaConsumer011<String> kafkaSource = new FlinkKafkaConsumer011<>(topic,
                new SimpleStringSchema(),
                prop);

        /**
         * 默认是true,表示Checkpoint做完之后，会将kafka的offset也向kafka的__consumer_offsets这个topic中写一份
         * link重启后，会先从Checkpoint中找到对应的offset继续消费，如果找不到，则会到kafka的__consumer_offsets这个
         * topic中去找
         */
        kafkaSource.setCommitOffsetsOnCheckpoints(false);
        DataStreamSource<String> streamSource = env.addSource(kafkaSource);

        streamSource.print();


        env.execute("KafkaSourceDemo");
    }
}
