package flink.utils;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchemaBuilder;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.KafkaSourceBuilder;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import com.alibaba.fastjson.JSONObject;
import flink.deserialize.JSONObjectDeserialization;
import flink.partitioner.KafkaPartitioner;
import flink.serialize.KafkaKeySerializeSchema;
import flink.serialize.KafkaTopicSelector;
import flink.serialize.KafkaValueSerializeSchema;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.annotation.Nullable;

import java.util.*;

import static flink.utils.AppConfig.*;

/**
 * @author ：Jason
 * @date ：Created in 2023/5/4 2:41 PM
 * @description：Flink 常用的工具类
 * @modified By：Jason
 * @version: 1.0
 */
public class FlinkUtils {

    public static Logger LOG = LoggerFactory.getLogger(FlinkUtils.class);
    private static final String SPLIT = ",";
    /**
     * 需要读取配置文件中的配置,只需要传入两个参数即可.
     *
     * @param properties Kafka 的配置信息,可以为 null
     * @return 返回 DataStream<JSONObject> 类型的 DS
     */
    public static DataStream<JSONObject> addKafkaSource(
            StreamExecutionEnvironment env, @Nullable Properties properties) {
        final String sourceKafkaBrokerList = getSourceKafkaBrokerList();
        final String sourceKafkaTopic = getSourceKafkaTopic();
        final String sourceKafkaGroupId = getSourceKafkaGroupId();
        final int sourceKafkaPartition = Integer.parseInt(getSourceKafkaPartition());
        final String sourceKafkaOffset = getSourceKafkaOffset();
        return getKafkaDataStream(
                env,
                properties,
                sourceKafkaBrokerList,
                sourceKafkaGroupId,
                sourceKafkaOffset,
                sourceKafkaTopic,
                sourceKafkaPartition);
    }

    /**
     * @param env Flink 环境变量
     * @param properties kafka 配置,可以为 null
     * @param kafkaBrokerList broker list
     * @param groupId group id
     * @param kafkaOffset 消费 kafka 的位置
     * @param topics topic
     * @param sourceKafkaPartition kafka source dataStream 的并行度
     * @return 返回 DataStream<JSONObject> 类型的 DS
     */
    public static DataStream<JSONObject> getKafkaDataStream(
            StreamExecutionEnvironment env,
            @Nullable Properties properties,
            String kafkaBrokerList,
            String groupId,
            String kafkaOffset,
            String topics,
            int sourceKafkaPartition) {
        final List<String> topicList = Arrays.asList(topics.split(SPLIT));
        final KafkaSourceBuilder<JSONObject> jsonObjectKafkaSourceBuilder =
                KafkaSource.<JSONObject>builder()
                        .setBootstrapServers(kafkaBrokerList)
                        .setTopics(topicList)
                        .setGroupId(groupId)
                        .setStartingOffsets(getStartingOffsets(kafkaOffset))
                        .setDeserializer(
                                KafkaRecordDeserializationSchema.of(
                                        new JSONObjectDeserialization(true)));

        if (properties != null) {
            jsonObjectKafkaSourceBuilder.setProperties(properties);
        }
        KafkaSource<JSONObject> kfkSource = jsonObjectKafkaSourceBuilder.build();

        final SingleOutputStreamOperator<JSONObject> dataStream =
                env.fromSource(kfkSource, WatermarkStrategy.noWatermarks(), "Kafka Source")
                        .uid("Kafka Source");

        // 设置 source 的并行度
        if (sourceKafkaPartition > 0) {
            dataStream.setParallelism(sourceKafkaPartition);
        }
        return dataStream;
    }

    /**
     * 不会读取配置文件,需要传入构造 Kafka Source 的所有参数
     *
     * @param properties Kafka 的配置信息,可以为 null
     * @return DataStream<JSONObject>
     */
    public static DataStream<JSONObject> addKafkaSource(
            StreamExecutionEnvironment env,
            @Nullable Properties properties,
            String kafkaBrokerList,
            String groupId,
            String kafkaOffset,
            String topics,
            int sourceKafkaPartition) {
        return getKafkaDataStream(
                env,
                properties,
                kafkaBrokerList,
                groupId,
                kafkaOffset,
                topics,
                sourceKafkaPartition);
    }

    /**
     * 根据 topic name 把一个 DataStream<JSONObject> 动态划分成多个 DataStream<JSONObject>
     *
     * @param dataStream DataStream<JSONObject>
     * @param topics topic name
     * @return 返回 Map<String, DataStream<JSONObject>> key 为 topic name, value 是对应的
     *     DataStream<JSONObject>
     */
    public static Map<String, DataStream<JSONObject>> splitStreamByTopicName(
            DataStream<JSONObject> dataStream, @Nullable String topics) {
        final List<String> topicList =
                topics == null
                        ? Arrays.asList(getSourceKafkaTopic().split(SPLIT))
                        : Arrays.asList(topics.split(SPLIT));
        // 动态定义多个 OutputTag
        Map<String, OutputTag<JSONObject>> outputTagMap = new HashMap<>(topicList.size());
        for (String topicName : topicList) {
            outputTagMap.put(
                    topicName, new OutputTag<>(topicName, TypeInformation.of(JSONObject.class)));
        }

        Map<String, DataStream<JSONObject>> dataStreamMap = new HashMap<>(topicList.size());
        // 侧流输出
        final SingleOutputStreamOperator<JSONObject> topicProcess =
                dataStream.process(
                        new ProcessFunction<JSONObject, JSONObject>() {
                            @Override
                            public void processElement(
                                    JSONObject value,
                                    ProcessFunction<JSONObject, JSONObject>.Context ctx,
                                    Collector<JSONObject> out) {
                                final String topic = value.getString("topic");
                                outputTagMap.forEach(
                                        (key, v) -> {
                                            if (key.equals(topic)) {
                                                ctx.output(v, value);
                                            }
                                        });
                            }
                        });
        for (String topic : topicList) {
            dataStreamMap.put(topic, topicProcess.getSideOutput(outputTagMap.get(topic)));
        }
        return dataStreamMap;
    }

    /**
     * 构建 KafkaSink<JSONObject> 对象,所有参数都要传入
     *
     * @param kafkaBrokerList broker list
     * @param sinkKafkaTopicExtractFiled 根据该字段提取 topic 信息
     * @param topic topic name,如果指定了就写入该 topic
     * @param topicPrefix topic 前缀,主要用于测试
     * @param keySerialize 根据该 key 生成序列化器
     * @param removeKey 在写入之前要删除的 key
     * @return 返回 KafkaSink<JSONObject>
     */
    public static KafkaSink<JSONObject> addKafkaSink(
            String kafkaBrokerList,
            String sinkKafkaTopicExtractFiled,
            String topic,
            String topicPrefix,
            String keySerialize,
            String removeKey) {
        return buildKafkaSink(
                kafkaBrokerList,
                sinkKafkaTopicExtractFiled,
                topic,
                topicPrefix,
                keySerialize,
                removeKey);
    }

    /**
     * 构造 KafkaSink<JSONObject> 对象,参数都是从配置文件中读取
     *
     * @return 返回 KafkaSink<JSONObject>
     */
    public static KafkaSink<JSONObject> addKafkaSink() {
        return buildKafkaSink(
                getSinkKafkaBrokerList(),
                getSinkKafkaTopicExtractFiled(),
                getSinkkafkaTopic(),
                getSinkKafkaTopicPrefix(),
                getSinkKafkaKeySerialize(),
                getSinkKafkaTopicRemoveKey());
    }

    /**
     * 构建 KafkaSink<JSONObject> 对象
     *
     * @param kafkaBrokerList kafka broker 信息
     * @param sinkKafkaTopicExtractFiled 根据这个字段提取数据流中的 topic
     * @param topicPrefix topic 前缀,测试时候可以添加
     * @param keySerialize kafka 序列化 key
     * @return 返回 KafkaSink<JSONObject>
     */
    public static KafkaSink<JSONObject> buildKafkaSink(
            String kafkaBrokerList,
            String sinkKafkaTopicExtractFiled,
            String topic,
            String topicPrefix,
            String keySerialize,
            String removeKey) {
        LOG.info(
                "kafkaBrokerList : {}, sinkKafkaTopicExtractFiled : {}, topic : {}, topicPrefix : {}, keySerialize: {}, removeKey : {}",
                kafkaBrokerList,
                sinkKafkaTopicExtractFiled,
                topic,
                topicPrefix,
                keySerialize,
                removeKey);
        final KafkaRecordSerializationSchemaBuilder<JSONObject>
                jsonObjectKafkaRecordSerializationSchemaBuilder =
                        KafkaRecordSerializationSchema.builder()
                                .setKeySerializationSchema(
                                        new KafkaKeySerializeSchema(keySerialize))
                                .setValueSerializationSchema(
                                        new KafkaValueSerializeSchema(removeKey))
                                .setPartitioner(new KafkaPartitioner());
        // topic 不为空 就写入该 topic,否则就从数据里动态获取
        if (StringUtils.isNotEmpty(topic)) {
            if (StringUtils.isNotEmpty(topicPrefix)) {
                topic = String.format("%s_%s", topicPrefix, topic);
            }
            jsonObjectKafkaRecordSerializationSchemaBuilder.setTopic(topic);
        } else {
            jsonObjectKafkaRecordSerializationSchemaBuilder.setTopicSelector(
                    new KafkaTopicSelector(sinkKafkaTopicExtractFiled, topicPrefix));
        }
        return KafkaSink.<JSONObject>builder()
                .setBootstrapServers(kafkaBrokerList)
                .setRecordSerializer(jsonObjectKafkaRecordSerializationSchemaBuilder.build())
                .build();
    }

    /**
     * 把 Table 转成 DataStream,这里主要是带有回撤流的数据源,直接过滤掉 -D 数据,只保留 +I 数据
     *
     * @param tEnv StreamTableEnvironment
     * @param query 查询的 SQL 语句
     * @param targetClass 输入的数据类型
     * @return 返回 DataStream
     * @param <T> 想要返回的数据类型
     */
    public static <T> DataStream<T> getRetractStream(
            StreamTableEnvironment tEnv, String query, Class<T> targetClass) {
        Table table = tEnv.sqlQuery(query);
        return tEnv.toRetractStream(table, targetClass)
                .filter(data -> data.f0)
                .map((MapFunction<Tuple2<Boolean, T>, T>) value -> value.f1);
    }

    /** 不带回撤流的 DataStream */
    public static <T> DataStream<T> getDataStream(
            StreamTableEnvironment tEnv, String query, Class<T> targetClass) {
        Table table = tEnv.sqlQuery(query);
        return tEnv.toDataStream(table, targetClass);
    }

    /**
     * 获取消费 Kafka offset 位置信息
     *
     * @return {@link OffsetsInitializer}
     */
    public static OffsetsInitializer getStartingOffsets(String kafkaOffset) {
        switch (kafkaOffset) {
            case "earliest":
                return OffsetsInitializer.earliest();
            case "latest":
            default:
                return OffsetsInitializer.latest();
        }
    }
}
