package com.atguigu.edu.realtime.util;

import com.atguigu.edu.realtime.common.EduConfig;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.io.IOException;

public class KafkaUtils {
    public static KafkaSource<String> getKafkaSource(String topic,String groupId){
        return KafkaSource.<String>builder()
                .setTopics(topic)
                .setGroupId(groupId)
                .setBootstrapServers(EduConfig.bootStrapServer)
                .setStartingOffsets(OffsetsInitializer.latest())
                .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed")
                .setValueOnlyDeserializer(new DeserializationSchema<String>() {
                    @Override
                    public String deserialize(byte[] bytes) throws IOException {
                        if (bytes != null){
                            return new String(bytes);
                        }
                        return null;
                    }
                    @Override
                    public boolean isEndOfStream(String s) {
                        return false;
                    }
                    @Override
                    public TypeInformation<String> getProducedType() {
                        return TypeInformation.of(String.class);
                    }
                })
                .build();
    }

    //获取kafkaSink
    public static <T> KafkaSink<T> getKafkaSinkBySchema(KafkaRecordSerializationSchema<T> krs) {
        KafkaSink<T> kafkaSink = KafkaSink.<T>builder()
                .setBootstrapServers(EduConfig.bootStrapServer)
                .setRecordSerializer(krs)
                //在生产环境中，如果要想保证写入的一致性，需要进行如下的日志
                //1.指定DeliveryGuarantee.EXACTLY_ONCE
                //2.setTransactionalIdPrefix("")
                //3.检查点开启
                //4.事务超时时间大于检查点超时时间  但是要小于事务最大超时时间(默认15min)
                //5.在消费端，对于没有提交的消息，不应该被消费
                // .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed")
                // .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                // .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,15*60*1000 + "")
                .build();
        return kafkaSink;
    }
}
