package com.yl.flink.source;

import cn.hutool.core.collection.ListUtil;
import cn.hutool.core.map.MapUtil;
import com.yl.constant.Const;
import com.yl.util.ParamUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.rabbitmq.RMQSource;
import org.apache.flink.streaming.connectors.rabbitmq.common.RMQConnectionConfig;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.io.IOException;
import java.util.Map;
import java.util.Optional;

/**
 * @author wlf
 * @since 2022/8/18
 */
@Slf4j
public class SourceTool {

    /**
     * 获取数据源
     */
    public static DataStreamSource<String> sourceStream(StreamExecutionEnvironment env, ParameterTool params) {
        DataStreamSource<String> sourceStream = null;
        try {
            String source = params.get(Const.FLINK_SOURCE);
            if (Const.S_RABBITMQ.equals(source)) {
                sourceStream = env
                        .addSource(getRMQSource(params))
                        .setParallelism(1);
            } else if (Const.S_KAFKA.equals(source)) {
                sourceStream = env
                        .fromSource(getKafkaSource(params), WatermarkStrategy.noWatermarks(), Const.S_KAFKA_SOURCE)
                        .setParallelism(1);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        Optional
                .ofNullable(sourceStream)
                .ifPresentOrElse(
                        s -> log.info("数据源配置成功！"),
                        () -> {
                            log.error("数据源配置失败！");
                            System.exit(1);
                        });
        return sourceStream;
    }

    /**
     * 获取 RabbitMQ 数据源
     */
    private static RMQSource<String> getRMQSource(ParameterTool params) {
        final RMQConnectionConfig connectionConfig = new RMQConnectionConfig.Builder()
                .setHost(params.get(Const.RABBITMQ_HOST))
                .setPort(params.getInt(Const.RABBITMQ_PORT))
                .setUserName(params.get(Const.RABBITMQ_USERNAME))
                .setPassword(params.get(Const.RABBITMQ_PASSWORD))
                .setVirtualHost(params.get(Const.RABBITMQ_VIRTUAL_HOST))
                .setPrefetchCount(30_000)
                .build();
        return new RMQSource<>(
                connectionConfig,
                params.get(Const.RABBITMQ_QUEUE_NAME),
                // 消费消息至少一次，精确一次需要开启checkpointing并使用关联标识即true
                false,
                new SimpleStringSchema()) {
            /**
             * 如果 rabbitmq 中的 queue 设置了 ttl
             * 这里要把 queueDeclare 的第二个参数修改成 true，并配置 x-message-ttl
             * 以毫秒为单位表示 TTL 的值，259200000表示3天有效期
             */
            @Override
            protected void setupQueue() throws IOException {
                if (queueName != null) {
                    Map<String, Object> arguments = MapUtil.of(
                            ParamUtil.getKeyWithoutPrefix(Const.RABBITMQ_MESSAGE_TTL),
                            params.getInt(Const.RABBITMQ_MESSAGE_TTL));
                    // queue,durable,exclusive,autoDelete,arguments
                    channel.queueDeclare(queueName, true, false, false, arguments);
                }
            }
        };
    }

    /**
     * 获取 Kafka 数据源
     */
    private static KafkaSource<String> getKafkaSource(ParameterTool params) {
        return KafkaSource
                .<String>builder()
                .setBootstrapServers(params.get(Const.KAFKA_BOOTSTRAP_SERVERS))
                .setGroupId(params.get(Const.KAFKA_GROUP_ID))
                .setTopics(ListUtil.toList(params.get(Const.KAFKA_CONSUMER_TOPICS).split(Const.PTN_COMMA)))
                .setDeserializer(KafkaRecordDeserializationSchema.valueOnly(StringDeserializer.class))
                // 消费数据从提交的偏移量开始，如果偏移量不存在则从最早开始
                .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.EARLIEST))
                // 自动提交偏移量，程序重启后会从自动提交的偏移量继续消费，当然，若开启了checkpoint以checkpoint为准
                .setProperty("enable.auto.commit", "true")
                .setProperty("auto.commit.interval.ms", "1000")
                .build();
    }

}
