package com.atguigu.gmallrealtime.util;

import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmallrealtime.common.Constant;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.connector.sink.Sink;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchemaBuilder;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.io.IOException;
import java.nio.charset.StandardCharsets;

/**
 * @author yhm
 * @create 2023-09-22 10:54
 */
public class MyKafkaUtil {


    public static KafkaSource<String> getKafkaSource(String topicName, String groupId) {
        return KafkaSource.<String>builder()
                .setBootstrapServers(Constant.KAFKA_SERVERS)
                .setTopics(topicName)
                .setGroupId(groupId)
                // 设置只读取kafka已经提交的数据
                .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed")
                .setStartingOffsets(OffsetsInitializer.earliest())
                // 由于后续flink会往kafka中发送value为null的数据  避免报错 修改一下反序列化逻辑
                .setValueOnlyDeserializer(new DeserializationSchema<String>() {
                    @Override
                    public String deserialize(byte[] message) throws IOException {
                        if (message != null && message.length != 0) {
                            return new String(message, StandardCharsets.UTF_8);
                        }
                        return null;
                    }

                    @Override
                    public boolean isEndOfStream(String nextElement) {
                        return false;
                    }

                    @Override
                    public TypeInformation<String> getProducedType() {
                        return BasicTypeInfo.STRING_TYPE_INFO;
                    }
                })
                .build();
    }

    public static KafkaSink<String> getKafkaSink(String topicName) {
        return KafkaSink.<String>builder()
                .setBootstrapServers(Constant.KAFKA_SERVERS)
                .setRecordSerializer(new KafkaRecordSerializationSchemaBuilder<String>()
                        .setTopic(topicName)
                        .setValueSerializationSchema(new SimpleStringSchema())
                        .build())
                .build();
    }

    //
    public static String getTopicDbDDL(String groupId) {
        return "CREATE TABLE topic_db (\n" +
                "  `database` STRING,\n" +
                "  `table` STRING,\n" +
                "  `type` STRING,\n" +
                "  `ts`    BIGINT,\n" +
                "  `data` Map<STRING,STRING>,\n" +
                "  `old`  Map<STRING,STRING>,\n" +
                "   proc_time AS proctime()\n" +
                ") "
                + MyKafkaUtil.getKafkaDDL(Constant.TOPIC_ODS_DB, groupId);
    }

    public static String getKafkaDDL(String topicName, String groupId) {
        return "WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = '" + topicName + "',\n" +
                "  'properties.bootstrap.servers' = '" + Constant.KAFKA_SERVERS + "',\n" +
                "  'properties.group.id' = '" + groupId + "',\n" +
                // 实际开发 从末尾读取数据
                "  'scan.startup.mode' = 'earliest-offset',\n" +
                "  'format' = 'json'\n" +
                ")";
    }

    public static String getUpsertKafkaDLL(String sinkTopic) {
        return " WITH (\n" +
                "  'connector' = 'upsert-kafka',\n" +
                "  'topic' = '" + sinkTopic + "',\n" +
                "  'properties.bootstrap.servers' = '" + Constant.KAFKA_SERVERS + "',\n" +
                "  'key.format' = 'json',\n" +
                "  'value.format' = 'json'\n" +
                ")";
    }

    public static <T>  KafkaSink<T>  getKafkaSinkWithTopicName( KafkaRecordSerializationSchema<T> krs) {
        return KafkaSink.<T>builder()
                .setBootstrapServers(Constant.KAFKA_SERVERS)
                .setRecordSerializer(krs)
                .build();
    }
}
