package com.atguigu.flink.state;

import com.alibaba.fastjson.JSON;
import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;
import java.util.Properties;

/**
 * Created by Smexy on 2023/3/3
 *
 *      当前程序，读取kafka中的数据，再处理后写入kafka。
 *
 *
 *      端到端的一致性：
 *          ①外部设备允许重放数据，source也能重放数据
 *          ②计算过程，state可以被ck
 *          ③sink端实现精确一次
 *                  事务输出
 *                      WAL
 *                      2PC:  KafakSink
 *                  幂等输出
 */
public class Demo12_EOS
{
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //state可以被ck
        env.enableCheckpointing(3000, CheckpointingMode.EXACTLY_ONCE);

        //使用kafkasource，可以重放数据(重置偏移量) source端保证
        KafkaSource<String> kafkaSource = KafkaSource
            .<String>builder()
            .setBootstrapServers("hadoop102:9092")
            .setGroupId("atguigu")
            .setTopics("topicB")
            //只要是使用kafkasource，都要加，否则无法保证精确一次
            .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed")
            .setValueOnlyDeserializer(new SimpleStringSchema())
            .setStartingOffsets(OffsetsInitializer.latest())
            .build();

        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092");
        properties.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,10 * 60 * 1000+"");

        /*
                使用kafkasink，可以使用2PC提交保证精确一次

                    当前生产者设置的事务超时时间(默认1h)，超过了 broker允许的最大值(默认15min)。
                The transaction timeout is larger than the maximum value allowed by the broker
                (as configured by transaction.max.timeout.ms).

         */
        FlinkKafkaProducer<WaterSensor> flinkKafkaProducer = new FlinkKafkaProducer<>(
            "无",
            new KafkaSerializationSchema<WaterSensor>()
            {
                @Override
                public ProducerRecord<byte[], byte[]> serialize(WaterSensor element, @Nullable Long timestamp) {
                    byte[] key = element.getId().getBytes(StandardCharsets.UTF_8);
                    byte[] value = JSON.toJSONString(element).getBytes(StandardCharsets.UTF_8);
                    return new ProducerRecord<>("topicD", key, value);
                }
            },
            properties,
            FlinkKafkaProducer.Semantic.EXACTLY_ONCE
        );


        SingleOutputStreamOperator<WaterSensor> ds = env
            .fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "source")
            .map(new WaterSensorMapFunction());

        ds.addSink(flinkKafkaProducer);
        ds.addSink(new SinkFunction<WaterSensor>()
        {
            @Override
            public void invoke(WaterSensor value, Context context) throws Exception {
                if ("s6".equals(value.getId())){
                    throw new RuntimeException("出异常了...");
                }
                System.out.println(value);
            }
        });


        try {
                            env.execute();
                        } catch (Exception e) {
                            e.printStackTrace();
                        }

    }
}
