package com.showdor.springboot.jingqing;

import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.errors.WakeupException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * @author showdor
 * @email chuanqi@outlook.com
 * @date 2025/3/11
 */

@Slf4j
public class ResilientKafkaConsumer<T> extends FlinkKafkaConsumer<T> {
    private static final Logger LOG = LoggerFactory.getLogger(ResilientKafkaConsumer.class);
    private static final int MAX_RETRIES = 5;
    private static final long RETRY_INTERVAL_MS = 10000; // 基础重试间隔

    private final String topic;
    private volatile boolean running = true;

    // 构造函数重载（适配不同父类构造方法）
    public ResilientKafkaConsumer(String topic, DeserializationSchema<T> deserializer, Properties props) {
        super(topic, deserializer, props);
        this.topic = topic;
    }

    public ResilientKafkaConsumer(String topic, KafkaDeserializationSchema<T> deserializer, Properties props) {
        super(topic, deserializer, props);
        this.topic = topic;
    }

    public ResilientKafkaConsumer(List<String> topics, DeserializationSchema<T> deserializer, Properties props) {
        super(topics, deserializer, props);
        this.topic = String.join(",", topics);
    }

    int retryCount = 0;

    @Override
    public void run(SourceContext<T> sourceContext) throws Exception {
        while (running && retryCount < MAX_RETRIES) {
            try {
                retryCount = retryCount + 1;
                LOG.info("开始消费主题: {} | 第 {} 次尝试", topic, retryCount + 1);
                super.run(sourceContext); // 正常消费流程
                retryCount = 0; // 成功则重置重试计数
            } catch (WakeupException we) {
                if (!running) break; // 正常终止
                LOG.warn("消费者被意外唤醒", we);
            } catch (Exception ex) {
                log.info("异常: ", ex);
                handleConnectionFailure(ex, ++retryCount);
            }
        }

        if (retryCount >= MAX_RETRIES) {
            LOG.error("主题 {} 达到最大重试次数 {} 次，终止消费", topic, MAX_RETRIES);
            throw new RuntimeException("Kafka连接永久失败");
        }
    }

    private void handleConnectionFailure(Exception ex, int retryCount) throws InterruptedException {
        long sleepTime = RETRY_INTERVAL_MS * (long) Math.pow(2, retryCount - 1); // 指数退避
        LOG.warn("【网络异常】主题: {} | 原因: {} | 正在进行第 {} 次重试，{} 秒后重试...",
                topic, ex.getMessage(), retryCount, sleepTime / 1000, ex);

        TimeUnit.MILLISECONDS.sleep(sleepTime);
        resetConsumer();
    }

    private void resetConsumer() {
        LOG.info("已成功释放旧消费者实例");
    }

    @Override
    public void cancel() {
        running = false;
        super.cancel();
        LOG.info("消费者任务已终止 | 主题: {}", topic);
    }
}