package com.atguigu.realtime.utils;

import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;


import java.io.IOException;

/**
 * @author: 洛尘
 * @since: 2023-09-26 00:18
 * @description:
 **/
public class MyKafkaUtil {
    private static final String KAFKA_SERVER ="hadoop105:9092,hadoop106:9092,hadoop107:9092";
    public static KafkaSource<String> getKafkaSource(String topic ,String groupId){
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                .setBootstrapServers(KAFKA_SERVER)
                .setTopics(topic)
                .setGroupId(groupId)
//                .setValueOnlyDeserializer(new SimpleStringSchema())正常在不处理空消息的情况下这么写就可以
//        但是这里需要考虑到空值的判断，所以需要用一下方法
                .setValueOnlyDeserializer(
                        new DeserializationSchema<String>() {
                            //将收到的数据源字节流解码成java对象,message就是要解析的对象
                            @Override
                            public String deserialize(byte[] message) throws IOException {
                                if (message != null) {
                                    return new String(message);
                                }
                                return null;
                            }

                            @Override
                            public boolean isEndOfStream(String s) {
                                return false;
                            }

                            @Override
                            public TypeInformation<String> getProducedType() {
                                return TypeInformation.of(String.class);
                            }
                        }
                )
                //
                .setStartingOffsets(OffsetsInitializer.latest())
                //在生产环境，使用如下配置
                //为了保证一致性，对于预提交的数据，不能消费
                // .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed")
                //从flink维度的偏移量位置开始消费，如果没有找到的话，从最新位置开始消费
                // .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST)
                //在学习的过程中，直接从kafka最新偏移量读取
                .build();

        return kafkaSource;
    }

    public static KafkaSink<String>  getKafkaSink(String topic){
        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                .setBootstrapServers(KAFKA_SERVER)
                .setRecordSerializer(KafkaRecordSerializationSchema.builder()
                        .setTopic(topic)
                        .setValueSerializationSchema(new SimpleStringSchema())
                        .build()
                )
                //这里的at least once 或者 exact once 也是保证sink的一致性
//                .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
//                //配合精准一次的设置1.设置前缀
//                .setTransactionalIdPrefix("xxxxxxxx")
//                //还必须将检查点打开
//                //在消费者端需要设置事务隔离级别为读已提交
//                //还要设置事务的超时时间
//                //检查点超时时间<=设置事务超时时间<=事务最大超时时间15min
//                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,"60000")//min
                .build();
        return kafkaSink;
    }
    //获取从topic_db中读取数据创建动态表的建表语句
    public static String getTopicDbDDL(String groupId) {
        return "CREATE TABLE topic_db (\n" +
                "    `database` string,\n" +
                "    `table` string,\n" +
                "    `type` string,\n" +
                "    `ts` string,\n" +
                "    `data` map<string,string>,\n" +
                "    `old` map<string,string>,\n" +
                "    proc_time as PROCTIME()\n" +
                ") " + getKafkaDDL("topic_db", groupId);
    }

    //获取kafka连接器连接属性
    public static String getKafkaDDL(String topic, String groupId) {
        return "WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = '" + topic + "',\n" +
                "  'properties.bootstrap.servers' = '" + KAFKA_SERVER + "',\n" +
                "  'properties.group.id' = '" + groupId + "',\n" +
//                "  //在学习的过程中，从最新的位置开始读取,在生产环境中，建议如下设置\n" +
                //" 'scan.startup.mode' = 'group-offsets',\n" +
                //" "'properties.auto.offset.reset' = 'latest'" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'json'\n" +
                ")";
    }

    //获取upsertKafka连接器连接属性
    public static String getUpsertKafkaDDL(String topic) {
        return " WITH (\n" +
                "  'connector' = 'upsert-kafka',\n" +
                "  'topic' = '" + topic + "',\n" +
                "  'properties.bootstrap.servers' = '" + KAFKA_SERVER + "',\n" +
                "  'key.format' = 'json',\n" +
                "  'value.format' = 'json'\n" +
                ")" ;
    }
    //获取KafkaSink
    //这里面用泛型模板来确定传输的类型
    public static <T>KafkaSink<T> getKafkaSinkBySchema(KafkaRecordSerializationSchema<T> krs) {
        KafkaSink<T> kafkaSink = KafkaSink.<T>builder()
                .setBootstrapServers(KAFKA_SERVER)
                .setRecordSerializer(krs)
                //在生产环境中，要想保证写到kafka的精准一致性，需要做如下的操作
                // .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                // .setTransactionalIdPrefix("xxxx")
                //必须将检查点打开
                //在消费者端，需要设置隔离级别为读已提交
                //检查点超时时间<=设置事务超时时间<=事务最大超时时间
                // .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,15*60*1000 + "")
                .build();
        return kafkaSink;
    }
}