package com.atguigu.flink.chapter07_state;

import com.alibaba.fastjson.JSON;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;
import java.util.Properties;

/**
 * Created by Smexy on 2022/10/28
 *      topicA                  topicC
 *      kakfa ----> Flink ----->Kafka
 *
 *     端到端的精确一次:
 *      source：  精确一次，可以从上次消费的位置重放偏移量。
 *                      开启了ck，自动把offset维护在状态中，和是否将offset提交到kafka无关！
 *      计算:    依赖ck。 设置语义。
 *      sink:    精确一次，依赖kafka的 2阶段提交
 *
 *   -----------------
 *   Unexpected error in InitProducerIdResponse;
 *   The transaction timeout is larger than the maximum value allowed by the broker
 *          客户端事务的超时时间 超过了 Broker端允许的最大值
 *      (as configured by transaction.max.timeout.ms).
 *
 *      默认brokder端：  transaction.max.timeout.ms = 15min
 *      客户端的超时时间:  不允许超过这个值
 *                          默认 transaction.timeout.ms  = 1h
 */
public class Demo3_EOSKafka
{
    public static void main(String[] args) {

        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 3333);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        //开启ck
        env.enableCheckpointing(2000);
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:9820/ck");
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        KafkaSource<String> source = KafkaSource.<String>builder()
            .setBootstrapServers("hadoop102:9092,hadoop103:9092")  //集群地址
            .setTopics("topicA")   //设置消费的主题
            .setGroupId("g1") //设置消费者组id
            .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.EARLIEST))   //设置起始偏移量
            //设置value的反序列化器
            .setValueOnlyDeserializer(new SimpleStringSchema())   //消费者必须设置 Key-Value的反序列化器
            //设置额外的消费者参数
            .setProperty("enable.auto.commit","false")   //允许consumer自动提交offsets
            .setProperty("auto.commit.interval.ms","1000") //每次自动提交的间隔
            .setProperty("isolation-level","read_committed")  //防止读到事务中未提交的数据(脏数据)
            .build();

        Properties properties = new Properties();
        properties.put("bootstrap.servers","hadoop103:9092");
        properties.put("transaction.timeout.ms",""+60*15*1000);

        FlinkKafkaProducer<WaterSensor> flinkKafkaProducer = new FlinkKafkaProducer<WaterSensor>(
            "没用",
            new KafkaSerializationSchema<WaterSensor>()
            {
                //怎么把流中的 WaterSensor 转化为 byte[],返回ProducerRecord
                @Override
                public ProducerRecord<byte[], byte[]> serialize(WaterSensor element, @Nullable Long timestamp) {
                    //直接把WaterSensor转为 byte[],在kafka tool上看到的都是byte[]
                    //第二种选择，可以先把 WaterSensor转为 String,再把String转为 byte[],在kafka tool可以看到的
                    byte[] value = JSON.toJSONString(element).getBytes(StandardCharsets.UTF_8);

                    //再设置个key，使用id作为key
                    byte[] key = element.getId().getBytes(StandardCharsets.UTF_8);

                    return new ProducerRecord<>("t1",key,value);
                }
            },
            properties,
            FlinkKafkaProducer.Semantic.EXACTLY_ONCE

        );


        SingleOutputStreamOperator<WaterSensor> ds = env
            .fromSource(source, WatermarkStrategy.noWatermarks(), "hahaha")
            .map(new MapFunction<String, WaterSensor>()
            {
                @Override
                public WaterSensor map(String value) throws Exception {
                    String[] data = value.split(",");
                    return new WaterSensor(
                        data[0],
                        Long.valueOf(data[1]),
                        Integer.valueOf(data[2])
                    );
                }
            });

        ds.addSink(flinkKafkaProducer);
        ds.addSink(new SinkFunction<WaterSensor>()
        {
            @Override
            public void invoke(WaterSensor value, Context context) throws Exception {
                if ("s5".equals(value.getId())){
                    throw new RuntimeException("抛异常了!");
                }
                System.out.println(value);
            }
        });

        try {
                    env.execute();
                } catch (Exception e) {
                    e.printStackTrace();
                }

    }
}
