/*
 *
 *  * Copyright (c) iwindplus Technologies Co., Ltd.2024-2030, All rights reserved.
 *
 *
 */

package com.iwindplus.base.binlog.manager;

import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.text.CharSequenceUtil;
import com.iwindplus.base.binlog.domain.property.BinlogProperty;
import com.iwindplus.base.binlog.domain.property.BinlogProperty.TopicHistory;
import com.iwindplus.base.binlog.domain.property.BinlogProperty.TopicOffset;
import com.iwindplus.base.binlog.handler.BinlogProcessHandler;
import com.iwindplus.base.domain.constant.CommonConstant.SymbolConstant;
import com.iwindplus.base.kafka.domain.property.KafkaProperty;
import com.iwindplus.base.kafka.support.KafkaTopicAutoCreator;
import io.debezium.config.CommonConnectorConfig;
import io.debezium.connector.binlog.BinlogConnectorConfig;
import io.debezium.connector.mysql.MySqlConnectorConfig;
import io.debezium.connector.mysql.MySqlConnectorConfig.SnapshotLockingMode;
import io.debezium.engine.ChangeEvent;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.format.Json;
import io.debezium.relational.RelationalDatabaseConnectorConfig;
import jakarta.annotation.Resource;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.context.SmartLifecycle;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

/**
 * Binlog 引擎管理器.
 *
 * @author zengdegui
 * @since 2025/11/22 00:02
 */
@Slf4j
public class BinlogEngineManager implements SmartLifecycle {

    @Resource
    private BinlogProperty property;

    @Resource
    private KafkaProperties kafkaProperties;

    @Resource
    private AdminClient adminClient;

    @Resource
    private BinlogProcessHandler handler;

    @Resource
    private ThreadPoolTaskExecutor threadPoolTaskExecutor;

    @Resource
    private ThreadPoolTaskScheduler threadPoolTaskScheduler;

    private AtomicBoolean running = new AtomicBoolean(false);
    private final Map<String, Future<?>> engines = new ConcurrentHashMap<>(16);

    @Override
    public void start() {
        if (Boolean.FALSE.equals(property.getEnabled())
            || Boolean.FALSE.equals(running.compareAndSet(false, true))) {
            log.warn("Binlog engine disabled or already started.");
            return;
        }

        kafkaTopicAutoCreate()
            .thenMany(Flux.fromIterable(property.getDataSources()))
            .doOnNext(this::submitEngine)
            .doOnComplete(() -> log.info("All binlog engines submitted, count={}", engines.size()))
            .subscribe();
    }

    @Override
    public void stop() {
        if (running.compareAndSet(true, false)) {
            engines.values().parallelStream().forEach(f -> f.cancel(true));
            engines.clear();
            threadPoolTaskExecutor.shutdown();
            threadPoolTaskScheduler.shutdown();
        }
    }

    @Override
    public boolean isRunning() {
        return this.running.get();
    }

    private Future<?> buildAndRunEngine(BinlogProperty.DataSource ds) {
        String connectorName = buildConnectorName(ds.getType(), ds.getServerId());

        DebeziumEngine<ChangeEvent<String, String>> engine =
            DebeziumEngine.create(Json.class)
                .using(buildProps(ds))
                .notifying(this::handleEvent)
                .using((success, msg, err) -> {
                    if (err != null) {
                        log.error("Binlog engine {} completed with error: {}", connectorName, msg, err);
                    } else {
                        log.info("Binlog engine {} completed: {}", connectorName, msg);
                    }
                    // 自重启
                    scheduleRestart(ds);
                })
                .build();

        return threadPoolTaskExecutor.submit(engine);
    }

    private void submitEngine(BinlogProperty.DataSource ds) {
        engines.computeIfAbsent(ds.getServerId(), k -> {
            log.info("submit binlog engine, serverId={}", k);
            return buildAndRunEngine(ds);
        });
    }

    private void scheduleRestart(BinlogProperty.DataSource ds) {
        if (!running.get()) {
            return;
        }

        engines.computeIfAbsent(ds.getServerId(), k -> {
            long delayMillis = 5 * 1000L;
            return threadPoolTaskScheduler.schedule(() -> {
                engines.remove(k);
                log.info("Restarting binlog engine for serverId={}", ds.getServerId());
                submitEngine(ds);
            }, Instant.ofEpochSecond(delayMillis));
        });
    }

    private void handleEvent(ChangeEvent<String, String> event) {
        final String value = event.value();
        if (CharSequenceUtil.isBlank(value)) {
            return;
        }

        handler.processHandler(event.value());
    }

    private Properties buildProps(BinlogProperty.DataSource ds) {
        final String bootstrapServer = kafkaProperties.getBootstrapServers().get(0);
        final TopicOffset offset = property.getOffset();
        final TopicHistory history = property.getHistory();

        Properties p = new Properties();
        p.setProperty("name", buildConnectorName(ds.getType(), ds.getServerId()));
        p.setProperty("connector.class", "io.debezium.connector.mysql.MySqlConnector");
        p.setProperty("bootstrap.servers", bootstrapServer);
        p.setProperty("offset.storage", "org.apache.kafka.connect.storage.KafkaOffsetBackingStore");
        p.setProperty("offset.storage.topic", offset.getTopicName());
        p.setProperty("offset.storage.partitions", String.valueOf(offset.getNumPartitions()));
        p.setProperty("offset.storage.replication.factor", String.valueOf(offset.getReplicationFactor()));
        p.setProperty("schema.history.internal.kafka.bootstrap.servers", bootstrapServer);
        p.setProperty("schema.history.internal.kafka.topic", history.getTopicName());
        p.setProperty("schema.history.internal.consumer.override.isolation.level", "read_committed");
        p.setProperty("offset.flush.interval.ms", "5000");
        p.setProperty("offset.flush.timeout.ms", "60000");

        p.setProperty(CommonConnectorConfig.TOPIC_PREFIX.name(), buildTopic(property.getTopicPrefix(), ds.getServerId()));
        p.setProperty(RelationalDatabaseConnectorConfig.HOSTNAME.name(), ds.getHost());
        p.setProperty(BinlogConnectorConfig.PORT.name(), String.valueOf(ds.getPort()));
        p.setProperty(RelationalDatabaseConnectorConfig.USER.name(), ds.getUsername());
        p.setProperty(RelationalDatabaseConnectorConfig.PASSWORD.name(), ds.getPassword());
        p.setProperty(BinlogConnectorConfig.SERVER_ID.name(), ds.getServerId());

        Optional.ofNullable(ds.getDatabaseIncludeList()).filter(CharSequenceUtil::isNotBlank)
            .ifPresent(v -> p.setProperty(MySqlConnectorConfig.DATABASE_INCLUDE_LIST.name(), v));
        Optional.ofNullable(ds.getDatabaseExcludeList()).filter(CharSequenceUtil::isNotBlank)
            .ifPresent(v -> p.setProperty(MySqlConnectorConfig.DATABASE_EXCLUDE_LIST.name(), v));
        Optional.ofNullable(ds.getTableIncludeList()).filter(CharSequenceUtil::isNotBlank)
            .ifPresent(v -> p.setProperty(MySqlConnectorConfig.TABLE_INCLUDE_LIST.name(), v));
        Optional.ofNullable(ds.getTableExcludeList()).filter(CharSequenceUtil::isNotBlank)
            .ifPresent(v -> p.setProperty(MySqlConnectorConfig.TABLE_EXCLUDE_LIST.name(), v));
        // 启动时做一次全量快照，然后转增量
        p.setProperty(MySqlConnectorConfig.SNAPSHOT_MODE.name(), property.getSnapshotMode().name());
        // 锁模式，仅对快照涉及的表加读锁
        p.setProperty(MySqlConnectorConfig.SNAPSHOT_LOCKING_MODE.name(), SnapshotLockingMode.MINIMAL.name());

        // 快照并发
        p.setProperty("snapshot.parallelism", "8");
        // 关闭心跳，减少 sys cpu
        p.setProperty("connect.keep.alive", "false");
        // 过滤 schema 变更，减少 30% 事件，不监听 DDL
        p.setProperty("include.schema.changes", "false");
        // 大字段不拉老值
        p.setProperty("lob.enabled", "false");
        // 性能参数
        p.setProperty("max.batch.size", "32768");
        p.setProperty("max.queue.size", "131072");
        p.setProperty("poll.interval.ms", "50");
        return p;
    }

    private Mono<Void> kafkaTopicAutoCreate() {
        if (Boolean.FALSE.equals(property.getEnabledDynamicRegister())) {
            return Mono.empty();
        }

        return Mono.fromRunnable(() ->
            KafkaTopicAutoCreator.createTopicsIfAbsent(adminClient, buildTopicConfigs(), 10L)
        ).then();
    }

    private List<KafkaProperty.Config> buildTopicConfigs() {
        final String topicPrefix = property.getTopicPrefix();
        List<KafkaProperty.Config> list = property.getDataSources()
            .stream()
            .map(ds -> {
                KafkaProperty.Config c = new KafkaProperty.Config();
                c.setTopicName(buildTopic(topicPrefix, ds.getServerId()));
                return c;
            })
            .collect(Collectors.toList());
        if (CollUtil.isEmpty(list)) {
            list = new ArrayList<>(10);
        }

        // offset topic
        TopicOffset offset = property.getOffset();
        KafkaProperty.Config offsetCfg = new KafkaProperty.Config();
        offsetCfg.setTopicName(offset.getTopicName());
        offsetCfg.setNumPartitions(offset.getNumPartitions());
        offsetCfg.setReplicationFactor(offset.getReplicationFactor());
        // 只保留最新一条
        offsetCfg.setArguments(Map.of(
            "cleanup.policy", "compact",
            "retention.ms", "-1"
        ));
        list.add(offsetCfg);

        // history topic
        TopicHistory history = property.getHistory();
        KafkaProperty.Config historyCfg = new KafkaProperty.Config();
        historyCfg.setTopicName(history.getTopicName());
        historyCfg.setNumPartitions(history.getNumPartitions());
        historyCfg.setReplicationFactor(history.getReplicationFactor());
        // 超过消息过期时间自动删除
        historyCfg.setArguments(Map.of(
            "cleanup.policy", "delete",
            "retention.ms", "-1"
        ));
        list.add(historyCfg);
        return list;
    }

    private String buildTopic(String topicPrefix, String serverId) {
        return topicPrefix + SymbolConstant.HORIZONTAL_LINE + serverId;
    }

    private String buildConnectorName(String type, String serverId) {
        return type + SymbolConstant.HORIZONTAL_LINE + serverId;
    }
}
