package com.alison.datastream.exactlyonce;

import cn.hutool.core.io.FileUtil;
import cn.hutool.core.lang.UUID;
import cn.hutool.core.util.HexUtil;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichFilterFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.PipelineOptions;
import org.apache.flink.configuration.PipelineOptionsInternal;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSink;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.TwoPhaseCommitSinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaException;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaSerializationSchemaWrapper;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
import org.apache.flink.streaming.connectors.kafka.internals.KeyedSerializationSchemaWrapper;
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord;
import org.apache.kafka.clients.producer.ProducerRecord;
import scala.util.parsing.input.StreamReader;

import javax.annotation.Nullable;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;

public class E3_FlinkKafkaEndToEnd {
    private static String taskName = E3_FlinkKafkaEndToEnd.class.getSimpleName();

    public static void main(String[] args) throws Exception {
//        flink.disable-metrics
        Configuration configuration = new Configuration();
//        configuration.setString(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID, "");
//        configuration.setString("execution.savepoint.path", "D:\\workspace\\lab\\learnbigdata\\learnflink\\flink-datastream\\src\\main\\resources\\checkpoint\\E3_FlinkKafkaEndToEnd\\3249001b9d13b4725768be93f4a1924f\\chk-2");
        String checkId = "123";
//        System.setProperty("checkId", checkId);
        configuration.setString("checkId", checkId);
        ParameterTool parameterTool = ParameterTool.fromMap(Collections.singletonMap("checkId", checkId));
        configuration.setBoolean("flink.disable-metrics", true);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
        env.getConfig().setGlobalJobParameters(parameterTool);

        env.setParallelism(1);
        //restart策略
        env.setRestartStrategy(RestartStrategies.noRestart());
        //本地checkpoint配置 每 10s 开始一次 checkpoint
        env.enableCheckpointing(1000 * 10L);
        CheckpointConfig checkpointConf = env.getCheckpointConfig();
        // 设置模式为精确一次 (这是默认值)
        checkpointConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 确认 checkpoints 之间的时间会进行 5s
        checkpointConf.setMinPauseBetweenCheckpoints(1000 * 5L);//5s
        // Checkpoint 必须在 1分钟内完成，否则就会被抛弃
        checkpointConf.setCheckpointTimeout(1000 * 60L);// 60s
        // 使用 externalized checkpoints，这样 checkpoint 在作业取消后仍就会被保留
        // RETAIN_ON_CANCELLATION: 当任务取消时保留checkpoint
        checkpointConf.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //需要设置flink checkpoint保存的位置
        env.setStateBackend(new HashMapStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("file:///D:/workspace/lab/learnbigdata/learnflink/flink-datastream/src/main/resources/checkpoint/" + taskName);
//        env.setStateBackend(new FsStateBackend("file:///D:/workspace/lab/learnbigdata/learnflink/flink-datastream/src/main/resources/checkpoint/" + taskName));
        String cacheFile = "D:\\workspace\\lab\\learnbigdata\\learnflink\\flink-datastream\\src\\main\\resources\\checkpoint\\E3_FlinkKafkaEndToEnd\\" + checkId + "\\_set";
        FileUtil.touch(new File(cacheFile));
        env.registerCachedFile("file:///"+cacheFile, "duplicateSet");

        //kafka source配置
        Properties sourceProperties = new Properties();
        sourceProperties.setProperty("bootstrap.servers", "192.168.56.101:9092");
        sourceProperties.setProperty("group.id", "kafkaSourceGroup");
        sourceProperties.setProperty("client.id", "flinkInputTopicClient");
        FlinkKafkaConsumer<String> kafkaSource = new FlinkKafkaConsumer<String>("flink_input_topic", new SimpleStringSchema(), sourceProperties) {

            @Override
            public FlinkKafkaConsumerBase<String> setCommitOffsetsOnCheckpoints(boolean commitOnCheckpoints) {
                return super.setCommitOffsetsOnCheckpoints(commitOnCheckpoints);
            }

            @Override
            public void open(Configuration configuration) throws Exception {
                super.open(configuration);
//                ParameterTool globalJobParameters = (ParameterTool) getRuntimeContext().getExecutionConfig().getGlobalJobParameters();
//                String checkId = globalJobParameters.get("checkId");
//                File duplicateSet = getRuntimeContext().getDistributedCache().getFile("duplicateSet");
            }
        };
//        kafkaSource.setStartFromEarliest();// 新的groups 可以消费之前的数据, 不是新groups,消费offset，
        // 如果开启了checkpoint,任务重启之后会按照checkpoint中保证的偏移量消费数据
//        kafkaSource.setStartFromLatest();// 只会消费新数据
//        kafkaSource.setStartFromSpecificOffsets(Collections.singletonMap(new KafkaTopicPartition("flink_input_topic", 0), 5L));
        // 13版本 过期了
//        FlinkKafkaConsumer011<String> kafkaSource = new FlinkKafkaConsumer011<String>("flink_input_topic", new SimpleStringSchema(), sourceProperties);

        //kafka sink配置
        Properties sinkProperties = new Properties();
        sinkProperties.setProperty("bootstrap.servers", "192.168.56.101:9092");
        sinkProperties.setProperty("group.id", "kafkaSinkGroup");
        sinkProperties.setProperty("client.id", "flinkOutputTopicClient");
//        sinkProperties.setProperty("isolation.level", "read_committed");
        //端到端一致性：需要指定transaction.timeout.ms(默认为1小时)的值，需要小于transaction.max.timeout.ms(默认为15分钟)
        sinkProperties.setProperty("transaction.timeout.ms", 1000 * 60 * 5 + "");
        //端到端一致性：FlinkKafkaProducer011需要指定为Semantic.EXACTLY_ONCE
        KafkaSerializationSchema<String> kafkaSerializationSchema = new KafkaSerializationSchema<String>() {
            @Override
            public ProducerRecord<byte[], byte[]> serialize(String element, @Nullable Long timestamp) {
                return new ProducerRecord<>("flink_output_topic", element.getBytes(StandardCharsets.UTF_8));
            }
        };
        FlinkKafkaProducer<String> kafkaSink = new FlinkKafkaProducer<String>("flink_output_topic", kafkaSerializationSchema, sinkProperties, FlinkKafkaProducer.Semantic.EXACTLY_ONCE) {
            File duplicateSetFile;

            @Override
            public void open(Configuration configuration) throws Exception {
                super.open(configuration);
                duplicateSetFile = getRuntimeContext().getDistributedCache().getFile("duplicateSet");
            }
            @Override
            protected KafkaTransactionState beginTransaction() throws FlinkKafkaException {
                KafkaTransactionState kafkaTransactionState = super.beginTransaction();
                System.out.println("===========beginTransaction=====================");
                System.out.println(kafkaTransactionState);
                System.out.println("===========beginTransaction=====================");
                return kafkaTransactionState;
            }

            // 只有在 checkpoint的 snapshotState 的时候才会 预提交
            @Override
            protected void preCommit(FlinkKafkaProducer.KafkaTransactionState transaction) throws FlinkKafkaException {
                System.out.println("-------preCommit--------");
                System.out.println(transaction);
                System.out.println("-------preCommit--------");
                super.preCommit(transaction);
            }

            // 提交对应的事物id
            @Override
            protected void commit(FlinkKafkaProducer.KafkaTransactionState transaction) {
                super.commit(transaction);
                // clear file content
                String outFilePath = "D:\\workspace\\lab\\learnbigdata\\learnflink\\flink-datastream\\src\\main\\resources\\checkpoint\\E3_FlinkKafkaEndToEnd" +
                        "\\" + checkId + "\\_set";
                FileUtil.del(outFilePath);
            }

            @Override
            protected void abort(FlinkKafkaProducer.KafkaTransactionState transaction) {
                super.abort(transaction);
            }

            /*
            还是会发送到kafka中
             */
            @Override
            public void invoke(KafkaTransactionState transaction, String next, Context context) throws FlinkKafkaException {
                super.invoke(transaction, next, context);
                String outFilePath = "D:\\workspace\\lab\\learnbigdata\\learnflink\\flink-datastream\\src\\main\\resources\\checkpoint\\E3_FlinkKafkaEndToEnd" +
                        "\\" + checkId + "\\_set";
                synchronized (outFilePath){
                    FileUtil.appendString(next + "\n", outFilePath, StandardCharsets.UTF_8);
                }
            }

        };

        DataStream<String> kafkaSourceDataStream = env.addSource(kafkaSource).name("kafkaSource").uid("kafkaSource");
//        kafkaSourceDataStream.print();
        AtomicInteger atomicInteger = new AtomicInteger(1);
        kafkaSourceDataStream.filter(new RichFilterFunction<String>() {
            Set<String> duplicateSet = new HashSet<>();
            @Override
            public void open(Configuration parameters) throws Exception {
                File duplicateSetFile = getRuntimeContext().getDistributedCache().getFile("duplicateSet");
                if (FileUtil.exist(duplicateSetFile) && duplicateSetFile.length() > 0) {
                    FileUtil.readUtf8Lines(duplicateSetFile, duplicateSet);
                }
            }
            @Override
            public boolean filter(String value) throws Exception {
                return !duplicateSet.contains(value);
            }
        }).map((MapFunction<String, String>) item -> {
            // set去重， 将数据转存到磁盘中，第二次来的时候根据 jobid+数据--》 转存成 seqId
            System.out.println("----" + item);
            int i = atomicInteger.incrementAndGet();
//            if (i == 10) {
//                throw new IllegalArgumentException("illegalArgument");
//            }
            return item;
        }).addSink(kafkaSink).name("kafkaSink").uid("kafkaSink");
        JobExecutionResult execute = env.execute(taskName);
        JobID jobID = execute.getJobID();
        System.out.println(jobID);
    }
}