package com.atguigu.flink.state;

import com.alibaba.fastjson.JSON;
import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;
import java.util.Properties;

/**
 * Created by Smexy on 2023/2/3
 *
 *
 *      离线:   ods层  ---->读  hql -----> dwd|dim层
 *          使用hdfs存储数据
 *
 *
 *      实时:   (ods层  ---->读  flink -----> dwd|dim层)  ----->  ( 读dwd ----->flink ------> dws    )
 *          使用kafka存储数据
 *
 *    ---------------------
 *      端到端精确一次:
 *              source： 能重置offset。 KafkaSource能从指定的offsets去读。
 *                          offset默认保存在KafkaSource的 状态中。
 *                          是否提交到kafka，无所谓！
 *
 *              flink的程序中: 开启ck。 设置语义 精确一次
 *
 *              sink:  FlinkKafkaProducer 使用 2pc提交工作。
 *                          保证精确一次。
 *
 *    -----------------
 *    Caused by: org.apache.kafka.common.KafkaException:
 *        Unexpected error in InitProducerIdResponse;   sink有关
 *        The transaction timeout is larger than the maximum value allowed by the broker
 *        (as configured by transaction.max.timeout.ms).
 *
 *        brokder端有 transaction.max.timeout.ms 默认 为 15min，
 *        producer端的超时时间不能超过这个值。FlinkKafkaProducer 默认为1h，因此需要调小transaction.timeout.ms。
 *
 */
public class Demo9_EOS
{
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(1);

        //开启ck
        env.enableCheckpointing(2000);
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:9820/ck");
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        //准备source
        KafkaSource<String> source = KafkaSource.<String>builder()
            .setBootstrapServers("hadoop102:9092")
            .setTopics("topicD")
            .setGroupId("test")
            //设置消费的起始位置
            .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST))
            .setValueOnlyDeserializer(new SimpleStringSchema())
            //希望封装其他参数
            .setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"true")
            .setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000")
            .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed")
            .build();

        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop103:9092");
        properties.setProperty("transaction.timeout.ms",10 * 60 * 1000+"");

        //准备sink
        FlinkKafkaProducer kafkaProducer = new FlinkKafkaProducer(
            "无",
            new KafkaSerializationSchema<WaterSensor>(){

                /*
                     把WaterSensor 封装为 ProducerRecord

                     ProducerRecord 是 key-value！
                 */
                @Override
                public ProducerRecord<byte[], byte[]> serialize(WaterSensor element, @Nullable Long timestamp) {
                    //生成key 按照传感器的id作为key，同一种传感器分到同一个partition
                    byte[] key = element.getId().getBytes(StandardCharsets.UTF_8);

                    //value： 数据本身  为了在客户端查看方便。 对象 ----> jsonstr ---->byte[]
                    byte[] value = JSON.toJSONString(element).getBytes(StandardCharsets.UTF_8);

                    return new ProducerRecord<>("topicC",key,value);
                }
            },
            properties,
            FlinkKafkaProducer.Semantic.EXACTLY_ONCE
        );


        SingleOutputStreamOperator<WaterSensor> ds = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka")
                                                           .map(new WaterSensorMapFunction());

        ds.addSink(kafkaProducer);
        ds.addSink(new SinkFunction<WaterSensor>()
        {
            @Override
            public void invoke(WaterSensor value, Context context) throws Exception {
                if( "s5".equals(value.getId())){
                    throw new RuntimeException("出异常了!");
                }
                System.out.println(value);
            }
        });


        try {
                            env.execute();
                        } catch (Exception e) {
                            e.printStackTrace();
                        }

    }
}
