package com.atguigu.flink.state;

import com.alibaba.fastjson.JSON;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;
import java.util.Properties;

/**
 * Created by Smexy on 2022/11/26
 *
 *      场景：
 *            Job1:  ods(kafka ----->flink ----->kafka)   dwd(kafka----> flink ---->kafka)
 *
 *      端到端的一致性:
 *
 *              source： KafkaSource 支持重复读数据
 *              transform:  保证使用ck存储状态
 *              sink:   FlinkKafkaProducer  支持2PC提交(事务方式)
 *
 *                      kafka支不支持幂等(覆盖写)?
 *                              不能。日志系统。只能append，不能修改。
 *
 *                      kafka提供了Producer的幂等支持，通过序列号。
 *
 *   ------------------------
  org.apache.kafka.common.KafkaException:
            Unexpected error in InitProducerIdResponse;
                producer事务的超时时间超过了broker允许的最大值。
                在broker端，有属性   =  15 minutes
                producer端的事务的超时时间 < 15min
                The transaction timeout is larger than the maximum value allowed by the broker
                   (as configured by transaction.max.timeout.ms).

            第一种： 调整broker端      transaction.max.timeout.ms
            第二种： 调整producer端：   transaction.timeout.ms
 *
 */
public class Demo5_KafkaEOS
{
    public static void main(String[] args) {


        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //开启checkpoint,每间隔2s，持久化一次
        env.enableCheckpointing(500);

        //设置持久化的位置 默认如果不设置外部的ck目录，把快照ck到JobManager进程的内存
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:9820/ck");

        //设置cancel后依旧保留快照
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        // 设置ck一致性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        // 设置barior发送频率
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);

        // 设置Checkpoint的超时时间，未完成的ck handle信息会被JobManager删除
        env.getCheckpointConfig().setCheckpointTimeout(60000);

        // 同一时间只允许一个 checkpoint 进行，设置流中同时存在的barior的数量
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);

        //source
        KafkaSource<String> source = KafkaSource.<String>builder()
            .setBootstrapServers("hadoop102:9092")
            .setTopics("t4")
            .setGroupId("sz0704c")
            .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.EARLIEST))
            .setValueOnlyDeserializer(new SimpleStringSchema())
            .setProperty("auto.commit.interval.ms","500")
            .setProperty("enable.auto.commit","true")
            //不能读取主题中尚未提交事务发送到数据
            .setProperty("isolation-level","read_committed")
            .build();

        //sink
        KafkaSerializationSchema<WaterSensor> kafkaSerializationSchema = new KafkaSerializationSchema<WaterSensor>()
        {
            /*
                序列化细节serialize：
                        传入 WaterSensor element ，返回ProducerRecord<byte[], byte[]>
             */
            @Override
            public ProducerRecord<byte[], byte[]> serialize(WaterSensor element, @Nullable Long timestamp) {

                //选择id作为key，同一个id放在一个分区
                byte[] key = element.getId().getBytes(StandardCharsets.UTF_8);

                //调用toString，先转为字符串，再转为 byte[]
                byte[] value = JSON.toJSONString(element).getBytes(StandardCharsets.UTF_8);

                return new ProducerRecord<byte[], byte[]>("t5", key, value);
            }
        };

        Properties properties = new Properties();
        properties.put("bootstrap.servers","hadoop102:9092");
        // 默认不允许 超过 broker端的15min
        properties.put("transaction.timeout.ms",60 * 10 * 1000 +"");

        FlinkKafkaProducer<WaterSensor> flinkKafkaProducer = new FlinkKafkaProducer<WaterSensor>(
            "无",
            kafkaSerializationSchema,
            properties,
            //设置sink的语义
            FlinkKafkaProducer.Semantic.EXACTLY_ONCE
        );

        SingleOutputStreamOperator<WaterSensor> ds = env.fromSource(source, WatermarkStrategy.noWatermarks(), "source")
                                                             .map(new MapFunction<String, WaterSensor>()
                                                             {
                                                                 @Override
                                                                 public WaterSensor map(String value) throws Exception {
                                                                     String[] words = value.split(",");
                                                                     return new WaterSensor(words[0], Long.valueOf(words[1]), Integer.valueOf(words[2]));
                                                                 }
                                                             });
        ds
           .addSink(flinkKafkaProducer);

        ds.addSink(new SinkFunction<WaterSensor>()
        {
            @Override
            public void invoke(WaterSensor value, SinkFunction.Context context) throws Exception {
                if (value.getId().equals("s5")){
                    throw new RuntimeException("出异常了...");
                }
                System.out.println(value);
            }
        });



        try {
                    env.execute();
                } catch (Exception e) {
                    e.printStackTrace();
                }



    }
}
