package com.atguigu.flink.chapter06.checkpoint;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.util.Collector;

import java.time.Duration;

import static org.apache.flink.streaming.api.CheckpointingMode.EXACTLY_ONCE;
import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;

public class KafkaFlinkKafkaDemo {
    public static void main(String[] args) {
        //shift + shift + u 大小写转换
         //设置参数代理
        System.setProperty("HADOOP_USER_NAME","atguigu");
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);


        //1、设置状态后端
        //env.setStateBackend(new HashMapStateBackend());
        env.setStateBackend(new EmbeddedRocksDBStateBackend(true));  //开启rocksDb的增量检查点
        //env.setStateBackend(new EmbeddedRocksDBStateBackend());//开启rocksDb的全量检查点


        env.enableChangelogStateBackend(true);  //开启通用增量检查点
        //2、开启Checkpoint，周期为3s
        env.enableCheckpointing(3000);

        //3、设置Checkpoint的存储目录
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck");

        //4、设置Checkpoint的并发数
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);

        //5、设置两个Checkpoint之间的最小间隔，单位毫秒，如果设置这个，则setMaxConcurrentCheckpoints无需设置
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);

        //6、设置Checkpoint  当程序被cancel的时候 的保留策略
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);

        //7、设置Checkpoint的mode
        env.getCheckpointConfig().setCheckpointingMode(EXACTLY_ONCE);


        //8、开启非对齐检查点，实现严格一次
        //env.getCheckpointConfig().setForceUnalignedCheckpoints(true); //强迫开启非对齐检查点
        env.getCheckpointConfig().enableUnalignedCheckpoints(); //默认使用对齐检查点，当超过一定时间没有完成，会自动使用非对齐检查点
        env.getCheckpointConfig().setAlignedCheckpointTimeout(Duration.ofSeconds(5));  // 一定时间

        //9、设置检查点的超时时间
        env.getCheckpointConfig().setCheckpointTimeout(10 * 1000);

        //10、设置检查点失败的次数
        //env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);

        //11、设置job的重启策略
        //env.setRestartStrategy(RestartStrategies.noRestart());  //不重启
        //env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000)); //固定延迟的重启

        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("hadoop102:9092")
                .setTopics("s1")
                .setGroupId("atguigu")
                .setStartingOffsets(OffsetsInitializer.latest())
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        SingleOutputStreamOperator<String> stream = env
                .fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source")
                .flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() {
                    @Override
                    public void flatMap(String value,
                                        Collector<Tuple2<String, Long>> out) throws Exception {
                        String[] words = value.split(" ");
                        for (String word : words) {
                            out.collect(Tuple2.of(word, 1L));
                        }
                    }
                })
                .keyBy(t -> t.f0)
                .sum(1)
                .map(t -> t.f0 + "_" + t.f1);

        KafkaSink<String> sink = KafkaSink.<String>builder()
                .setBootstrapServers("hadoop102:9092")
                .setRecordSerializer(
                        KafkaRecordSerializationSchema.<String>builder()
                                .setTopic("s2")
                                .setValueSerializationSchema(new SimpleStringSchema())
                                .build()
                )
                //设置严格一次：启用两阶段提交
                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                .setTransactionalIdPrefix("atguigu2-")
                //服务器（broker）不允许事务超时时间超过15分钟
                .setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "")
                .build();

        stream.sinkTo(sink);


        stream.addSink(new SinkFunction<String>() {
            @Override
            public void invoke(String value,
                               Context context) throws Exception {
                if (value.contains("x")){
                    Thread.sleep(1000);
                    throw new RuntimeException("异常发生........");
                }
            }
        });

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

//public class KafkaFlinkKafkaDemo {
//    public static void main(String[] args) {
//        System.setProperty("HADOOP_USER_NAME","atguigu");
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",2000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
//        env.setParallelism(1);
//
//        //1、设置状态后端
//        env.setStateBackend(new HashMapStateBackend());
////        env.setStateBackend(new EmbeddedRocksDBStateBackend(true)); //开启rocksDb状态后端
//
//        env.enableChangelogStateBackend(true);
//        //2、设置 checkpoint ,周期为3s
//        env.enableCheckpointing(3000);
//
//        //3、设置checkpoint的存储目录
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck");
//
//        //4、设置checkpoint的并发数
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
//
//        //5、设置两个checkpoint之间的最小间隔，单位毫秒，如果设置这个，则setMaxConcurrentCheckpoint无需设置
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
//
//        //6、设置checkpoint 当程序被cancel的时候的保留策略
//        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
//
//        //7、设置chekpoint的mode
//        env.getCheckpointConfig().setCheckpointingMode(EXACTLY_ONCE);
//
//
//        //8、开启非对称检查点，实现严格一次
//        //env.getCheckpointConfig().setForceUnalignedCheckpoints(true); //强迫开启非对称检查点
//        env.getCheckpointConfig().enableUnalignedCheckpoints();//默认使用对齐检查点，当超过一定时间没有完成，会自动使用非对齐检查点
//        env.getCheckpointConfig().setAlignedCheckpointTimeout(Duration.ofSeconds(5)); //一定时间
//
//        //9、设置检查点的超时时间
//        env.getCheckpointConfig().setCheckpointTimeout(10 * 1000);
//
//        //10、设置检查点的失败次数
//        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
//
//        //11、设置job的重启策略
//        env.setRestartStrategy(RestartStrategies.noRestart()); //不重启
//        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000)); //固定延迟的重启
//
//
//        KafkaSource<String> source = KafkaSource.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setTopics("s1")
//                .setGroupId("atguigu")
//                .setStartingOffsets(OffsetsInitializer.latest())
//                .setValueOnlyDeserializer(new SimpleStringSchema())
//                .build();
//
//        SingleOutputStreamOperator<String> stream = env
//                .fromSource(source, WatermarkStrategy.noWatermarks(), "kakfa-source")
//                .flatMap(new FlatMapFunction<String, Tuple2<String,Integer>>() {
//                    @Override
//                    public void flatMap(String value,
//                                        Collector<Tuple2<String, Integer>> out) throws Exception {
//                        String[] words = value.split(" ");
//                        for (String word : words) {
//                            out.collect(Tuple2.of(word,1));
//                        }
//                    }
//                })
//                .keyBy(t -> t.f0)
//                .sum(1)
//                .map(t -> t.f0 + "_" + t.f1);
//
//        KafkaSink<String> sink = KafkaSink.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setRecordSerializer(
//                        KafkaRecordSerializationSchema.<String>builder()
//                                .setTopic("s2")
//                                .setValueSerializationSchema(new SimpleStringSchema())
//                                .build()
//                )
//                //设置严格一次：启用两个阶段提交
//                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
//                .setTransactionalIdPrefix("atguigu2-")
//                //服务器（broker）不允许失误超时时间超过15分钟
//                .setProperty("transcation.timeout.ms", 15 * 60 * 1000 + "")
//                .build();
//
//        stream.sinkTo(sink);
//
//        stream.addSink(new SinkFunction<String>() {
//            @Override
//            public void invoke(String value,
//                               Context context) throws Exception {
//                if (value.contains("x")){
//                    Thread.sleep(1000);
//                    throw new RuntimeException("异常发生....");
//                }
//            }
//        });
//
//
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//    }
//}


//public class KafkaFlinkKafkaDemo {
//    public static void main(String[] args) {
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",2000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
//        env.setParallelism(1);
//
//        //1、设置状态后端
//        env.setStateBackend(new HashMapStateBackend());
////        env.setStateBackend(new EmbeddedRocksDBStateBackend(true));  //开启rocksDb状态后端
//
//
//        env.enableChangelogStateBackend(true);
//        //2、开启Checkpoint ,周期为3s
//        env.enableCheckpointing(3000);
//
//        //3、设置 Checkpoint 的存储目录
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck");
//
//        //4、设置 Checkpoint的并发数
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
//
//        //5、设置两个 Checkpoint之间的最小间隔 ，单位毫秒，如果设置这个，就无需设置setMaxConcurrentCheckpoints。
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
//
//        //6、设置 Checkpoint ，当程序被cancel的时候的保留策略
//        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
//
//        //7、设置 Checkpoint 的mode
//        env.getCheckpointConfig().setCheckpointingMode(EXACTLY_ONCE);
//
//        //8、开启非对称检查点，实现严格一次
//        //env.getCheckpointConfig().setForceUnalignedCheckpoints(true); //强迫开启非对称检查点
//        env.getCheckpointConfig().enableUnalignedCheckpoints();  //默认使用对齐检查点。当超过一定时间没有完成，会自动使用非对齐检查点
//        env.getCheckpointConfig().setAlignedCheckpointTimeout(Duration.ofSeconds(5)); //一定时间
//
//        //9、设置  Checkpoint 的超时时间
//        env.getCheckpointConfig().setCheckpointTimeout(10 * 1000);
//
//        //10、设置 Checkpoint 的失败次数
//        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
//
//        //11、设置job的重启策略
//        env.setRestartStrategy(RestartStrategies.noRestart()); //不重启
//        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000)); //固定延迟的重启
//
//        KafkaSource<String> kfsource = KafkaSource.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setTopics("s1")
//                .setGroupId("atguigu")
//                .setStartingOffsets(OffsetsInitializer.latest())
//                .setValueOnlyDeserializer(new SimpleStringSchema())
//                .build();
//
//        SingleOutputStreamOperator<String> stream = env
//                .fromSource(kfsource, WatermarkStrategy.noWatermarks(), "kafka-source")
//                .flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
//                    @Override
//                    public void flatMap(String value,
//                                        Collector<Tuple2<String, Integer>> out) throws Exception {
//                        String[] words = value.split(" ");
//                        for (String word : words) {
//                            out.collect(Tuple2.of(word, 1));
//                        }
//                    }
//                })
//                .keyBy(t -> t.f0)
//                .sum(1)
//                .map(t -> t.f0 + "_" + t.f1);
//
//        KafkaSink<String> sink = KafkaSink.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setRecordSerializer(
//                        KafkaRecordSerializationSchema.<String>builder()
//                                .setTopic("s2")
//                                .setValueSerializationSchema(new SimpleStringSchema())
//                                .build()
//                )
//                //设置严格一次，启用两个阶段提交
//                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
//                .setTransactionalIdPrefix("atguigu2-")
//                //服务器（broker）不允许事务超时时间超过15分钟
//                .setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "")
//                .build();
//
//
//        stream.sinkTo(sink);
//
//        stream.addSink(new SinkFunction<String>() {
//            @Override
//            public void invoke(String value,
//                               Context ctx) throws Exception {
//                if (value.contains("x")) {
//                    Thread.sleep(1000);
//                    throw new RuntimeException("程序异常...");
//                }
//            }
//        });
//
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//    }
//}


//public class KafkaFlinkKafkaDemo {
//    public static void main(String[] args) {
//        //设置用户代理     ctrl + shift +u 大小写转换
//        System.setProperty("hadoop_user_name","atguigu");
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",2000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
//        env.setParallelism(1);
//
//        //1、设置状态后端
//        env.setStateBackend(new HashMapStateBackend());
//        env.setStateBackend(new EmbeddedRocksDBStateBackend(true)); //开启rocksDb状态后端
//
//        env.enableChangelogStateBackend(true);
//        //2、开启 Checkpoint , 周期为3s
//        env.enableCheckpointing(3000);
//
//        //3、设置 Checkpoint的存储目录
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck");
//
//        //4、设置 Checkpoint 的并发数
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
//
//        //5、设置 两个Checkpoint之间的最小间隔，单位毫秒，如果设置了这个， 就无需设置setMaxConcurrentCheckpoints了
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
//
//        //6、设置 Checkpoint  当程序被cancel的保留策略
//        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
//
//        //7、设置 Checkpoint 的mode
//        env.getCheckpointConfig().setCheckpointingMode(EXACTLY_ONCE);
//
//        //8、开启非对称检查点，实现严格一次
//        //env.getCheckpointConfig().setForceUnalignedCheckpoints(true); //强迫开启非对称检查点
//        env.getCheckpointConfig().enableUnalignedCheckpoints(); //默认使用对齐检查点，如果超过一定时间，就会自动使用非对齐检查点
//        env.getCheckpointConfig().setAlignedCheckpointTimeout(Duration.ofSeconds(5)); //一定时间
//
//        //9、设置 Checkpoint 的超时时间
//        env.getCheckpointConfig().setCheckpointTimeout(10 * 1000);
//
//        //10、设置 Checkpoint 的失败次数
//        //env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
//
//        //11、设置job的重启策略
//        //env.setRestartStrategy(RestartStrategies.noRestart()); //不重启
//        //env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000));
//
//        KafkaSource<String> source = KafkaSource.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setTopics("s1")
//                .setGroupId("atguigu")
//                .setStartingOffsets(OffsetsInitializer.latest())
//                .setValueOnlyDeserializer(new SimpleStringSchema())
//                .build();
//
//        SingleOutputStreamOperator<String> stream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source")
//                .flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
//                    @Override
//                    public void flatMap(String value,
//                                        Collector<Tuple2<String, Integer>> out) throws Exception {
//                        String[] words = value.split(" ");
//                        for (String word : words) {
//                            out.collect(Tuple2.of(word, 1));
//                        }
//                    }
//                })
//                .keyBy(t -> t.f0)
//                .sum(1)
//                .map(t -> t.f0 + "_" + t.f1);
//
//
//        KafkaSink<String> sink = KafkaSink.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setRecordSerializer(
//                        KafkaRecordSerializationSchema.<String>builder()
//                                .setTopic("s2")
//                                .setValueSerializationSchema(new SimpleStringSchema())
//                                .build()
//                )
//                //设置严格一次,两个阶段的提交
//                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
//                .setTransactionalIdPrefix("atguigu2-")
//                //服务器（broker）不允许事务超时时间超过15分钟
//                .setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "")
//                .build();
//
//        stream.sinkTo(sink);
//
//
//        stream.addSink(new SinkFunction<String>() {
//            @Override
//            public void invoke(String value,
//                               Context ctx) throws Exception {
//                if (value.contains("x")){
//                    Thread.sleep(1000);
//                    throw new RuntimeException("程序异常....");
//                }
//            }
//        });
//
//
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//    }
//}

//public class KafkaFlinkKafkaDemo {
//    public static void main(String[] args) {
//        //设置用户代理   ctrl + shift + u
//        System.setProperty("HADOOP_USER_NAME","atguigu");
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",2000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//        env.setParallelism(1);
//
//        //1、设置状态后端
////        env.setStateBackend(new HashMapStateBackend());
//        env.setStateBackend(new EmbeddedRocksDBStateBackend(true)); //开启RocksDb状态后端
//
//
//        env.enableChangelogStateBackend(true);
//        //2、开启 Checkpoint ,周期为3s
//        env.enableCheckpointing(3000);
//
//        //3、设置 Checkpoint 的存储目录
//        env.getCheckpointConfig().setCheckpointStorage("hadfs://hadoop102:8020/ck");
//
//        //4、设置 Checkpoint 的并发数
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
//
//        //5、设置两个 Checkpoint 之间的最小间隔，单位毫秒，如果设置了这个，就不需要设置  setMaxConcurrentCheckpoints 了。
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
//
//        //6、设置 Checkpoint ，当程序被cancel之后的保留策略
//        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
//
//        //7、设置 Checkpoint 的mode
//        env.getCheckpointConfig().setCheckpointingMode(EXACTLY_ONCE);
//
//        //8、开启非对称检查点，实现严格一次
////        env.getCheckpointConfig().setForceUnalignedCheckpoints(true); //强迫使用非对齐检查点
//        env.getCheckpointConfig().enableUnalignedCheckpoints(); //默认使用对齐检查点，如果超过一定的时间，就会自动使用非对齐检查点
//        env.getCheckpointConfig().setAlignedCheckpointTimeout(Duration.ofSeconds(5));
//
//        //9、设置 Checkpoint 的超时时间
//        env.getCheckpointConfig().setCheckpointTimeout(10 * 1000);
//
//        //10、设置 Checkpoint 的失败次数
//        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
//
//        //11、设置 job 的重启策略
//        env.setRestartStrategy(RestartStrategies.noRestart()); //不重启
//        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000)); //固定的延迟时间
//
//
//        KafkaSource<String> source = KafkaSource.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setTopics("s1")
//                .setGroupId("atguigu")
//                .setStartingOffsets(OffsetsInitializer.latest())
//                .setValueOnlyDeserializer(new SimpleStringSchema())
//                .build();
//
//        SingleOutputStreamOperator<String> stream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source")
//                .flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
//                    @Override
//                    public void flatMap(String value,
//                                        Collector<Tuple2<String, Integer>> out) throws Exception {
//                        String[] words = value.split(" ");
//                        for (String word : words) {
//                            out.collect(Tuple2.of(word, 1));
//                        }
//                    }
//                })
//                .keyBy(t -> t.f0)
//                .sum(1)
//                .map(t -> t.f0 + "_" + t.f1);
//
//
//        KafkaSink<String> sink = KafkaSink.<String>builder()
//                .setBootstrapServers("hadoop102:9092")
//                .setRecordSerializer(
//                        KafkaRecordSerializationSchema.<String>builder()
//                                .setTopic("s2")
//                                .setValueSerializationSchema(new SimpleStringSchema())
//                                .build()
//                )
//                //设置严格一次：两个阶段事务提交
//                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
//                .setTransactionalIdPrefix("atguigu2-")
//                //服务器（broker）不允许事务 超时 时间 超过 15 分钟
//                .setProperty("transaction.timeout.ms",15 * 60 * 1000 + "")
//                .build();
//
//        stream.sinkTo(sink);
//
//
//        stream.addSink(new SinkFunction<String>() {
//            @Override
//            public void invoke(String value,
//                               Context ctx) throws Exception {
//                if (value.contains("x")){
//                    Thread.sleep(1000);
//                    throw new RuntimeException("程序异常.......");
//                }
//            }
//        });
//
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//
//
//    }
//}