package com.atguigu.flink.chapter06.ckeckpoint;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.util.Collector;

import java.time.Duration;

import static org.apache.flink.streaming.api.CheckpointingMode.EXACTLY_ONCE;
import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;

/**
 * @Author lzc
 * @Date 2023/6/26 11:22
 */
public class KafkaFlinkKafkaDemo {
    public static void main(String[] args) {
        // shift + ctrl + u 大小写转换
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        Configuration conf = new Configuration();
        
        conf.setInteger("rest.port", 2000);
//        conf.set(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH, false);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
    
        
        // 1. 设置状态后端
//        env.setStateBackend(new HashMapStateBackend());
//        env.setStateBackend(new EmbeddedRocksDBStateBackend(true)); // 开启 rocksdb 的整理检查点
        env.setStateBackend(new EmbeddedRocksDBStateBackend()); // 开启 rocksdb 的整理检查点
    
    
        env.enableChangelogStateBackend(true);
        // 2. 开启 Checkpoint, 周期 3s
        env.enableCheckpointing(3000);
        
        // 3. 设置 Checkpoint 的存储目录
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck");
        
        // 3. 设置 Checkpoint 的并发数
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        
        // 4. 设置两个 Checkpoint 之间的最小间隔 单位毫秒. 如果设置这个, 则setMaxConcurrentCheckpoints无需设置
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        
        // 5. 设置 Checkpoint  当程序被 cancel 的时候 的保留策略
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
        
        // 6.设置 Checkpoint 的 mode
        env.getCheckpointConfig().setCheckpointingMode(EXACTLY_ONCE);
        
        // 7. 开启非对齐检查点, 实现严格一次
//        env.getCheckpointConfig().setForceUnalignedCheckpoints(true); // 强迫开启非对齐检查点
        env.getCheckpointConfig().enableUnalignedCheckpoints(); // 默认使用对齐检查点,当超过一定时间没有完成,在会自动使用非对齐
        env.getCheckpointConfig().setAlignedCheckpointTimeout(Duration.ofSeconds(5)); // 一定时间
        
        // 8. 设置检查点的超时时间
        env.getCheckpointConfig().setCheckpointTimeout(10 * 1000);
        
        // 8. 设置检查点失败的次数
//        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
//
        // 9. 设置 job 的重启策略
//        env.setRestartStrategy(RestartStrategies.noRestart());  // 不重启
//        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000));  // 固定延迟的重启
        
        KafkaSource<String> source = KafkaSource.<String>builder()
            .setBootstrapServers("hadoop162:9092")
            .setTopics("s1")
            .setGroupId("atguigu")
            .setStartingOffsets(OffsetsInitializer.latest())
            .setValueOnlyDeserializer(new SimpleStringSchema())
            .build();
        SingleOutputStreamOperator<String> steam = env
            .fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source")
            .flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() {
                @Override
                public void flatMap(String value,
                                    Collector<Tuple2<String, Long>> out) throws Exception {
                    for (String word : value.split(" ")) {
                        out.collect(Tuple2.of(word, 1L));
                    
                    }
                }
            })
            .keyBy(t -> t.f0)
            .sum(1)
            .map(t -> t.f0 + "_" + t.f1);
    
        KafkaSink<String> sink = KafkaSink.<String>builder()
            .setBootstrapServers("hadoop162:9092")
            .setRecordSerializer(
                KafkaRecordSerializationSchema.<String>builder()
                    .setTopic("s2")
                    .setValueSerializationSchema(new SimpleStringSchema())
                    .build()
            )
            // 设置严格一次: 启用两阶段提交
            .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
            .setTransactionalIdPrefix("atguigu2-")
            // 服务器(broker)不允许失误超时时间超过 15 分钟
            .setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "")
            .build();
        
        steam.sinkTo(sink);
        
        
        steam.addSink(new SinkFunction<String>() {
            @Override
            public void invoke(String value, Context context) throws Exception {
                if (value.contains("x")) {
                    Thread.sleep(1000);
                    throw new RuntimeException("异常发生....");
                }
                
            }
        });
        
    
    
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
