package com.whz.bus.debezium;

import com.whz.bus.data.NacosOffsetStorage;
import com.whz.bus.util.JdbcUrlParser;
import io.debezium.config.CommonConnectorConfig;
import io.debezium.config.Configuration;
import io.debezium.embedded.EmbeddedEngine;
import io.debezium.relational.RelationalDatabaseConnectorConfig;
import io.debezium.relational.history.DatabaseHistory;
import io.debezium.relational.history.FileDatabaseHistory;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.connect.runtime.standalone.StandaloneConfig;
import org.apache.kafka.connect.storage.FileOffsetBackingStore;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
import tech.tongyu.common.utils.SequenceUtil;
import tech.tongyu.common.utils.web.IpUtils;

import java.util.Properties;

/**
 * debezium的配置处理类
 */
@Slf4j
public class DebeziumConfigurationWrapper {

    public static final String HISTORY_DB_DAT = "/logs/debezium/history/db.log";
    public static final String OFFSET_DATA_LOG = "/logs/debezium/offset/data.log";
    public static final String TY_DBZ_ENGINE_NAME = "ty_engine_";
    public static final String TY_DBZ_SERVER_NAME = "ty_server_";
    public static final String COMMA = ",";

    private final DebeziumProperties debeziumProperties;
    private final DataSourceWrapper dataSourceWrapper;

    public DebeziumConfigurationWrapper(DebeziumProperties debeziumProperties, DataSourceWrapper dataSourceWrapper) {
        Assert.notNull(debeziumProperties, "debeziumProperties is null");
        Assert.notNull(dataSourceWrapper, "dataSourceWrapper ");

        this.debeziumProperties = debeziumProperties;
        this.dataSourceWrapper = dataSourceWrapper;
    }

    /**
     * {@link CommonConnectorConfig}
     *
     * @return {@link Configuration}
     */
    public Configuration debeziumConfig() {

        //  debezium的connector配置
        final Properties properties = debeziumGlobalConfig();

        // 数据库的配置
        monitorDataBase(properties);

        if (dataSourceWrapper.isMySql()) {
            // mysql的配置
            mysqlOnly(properties);
        }
        if (dataSourceWrapper.isPG()) {
            // pg
            pgOnly(properties);
        }

        // 表结构的历史变更记录
        debeziumHistory(properties);

        return Configuration.from(properties);
    }

    private void pgOnly(final Properties properties) {
        properties.put(RelationalDatabaseConnectorConfig.SERVER_NAME.name(), getUniqueServerName());
        // mysql 监听数据库,pg监听schema
        DebeziumProperties.Postgres postgres = debeziumProperties.getPostgres();
        // pg 监听的是schema
        properties.put(RelationalDatabaseConnectorConfig.SCHEMA_INCLUDE_LIST.name(),
                       String.join(COMMA, postgres.getSchema()));
        // 应该是只能监听一个库
        properties.put(RelationalDatabaseConnectorConfig.DATABASE_NAME.name(),
                       String.join(COMMA, postgres.getDatabase()));
        // 监听的table,多个用英文逗号分开
        properties.put(RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST.name(),
                       String.join(COMMA, postgres.getTables()));
    }

    private void mysqlOnly(final Properties properties) {
        // mysql 监听数据库,pg监听schema
        DebeziumProperties.Mysql mysql = debeziumProperties.getMysql();

        properties.put(RelationalDatabaseConnectorConfig.DATABASE_INCLUDE_LIST.name(),
                       String.join(COMMA, mysql.getDatabase()));
        // mysql cluster中配置的唯一id,单机的mysql就随意了
        // mysql 遇到相同的server_id,断开旧的,链接新的.集群部署时可能重复binlog
        properties.put("database.server.id", mysql.getServerId());
        // debezium connector 唯一名称,数据库集群内应该唯一
        properties.put(RelationalDatabaseConnectorConfig.SERVER_NAME.name(), getUniqueServerName());
        // 监听的table,多个用英文逗号分开
        properties.put(RelationalDatabaseConnectorConfig.TABLE_INCLUDE_LIST.name(),
                       String.join(COMMA, mysql.getTables()));
    }

    private String getUniqueServerName() {
        String name = TY_DBZ_SERVER_NAME + IpUtils.getLocalIpNum() + "_" + SequenceUtil.makeStringId();
        log.info("Unique name that identifies the database server and all recorded offsets:[{}]", name);
        return name;
    }

    private void debeziumHistory(Properties properties) {
        properties.put("database.history", FileDatabaseHistory.class.getName());
        properties.put(FileDatabaseHistory.FILE_PATH.name(), getHistoryPath());
        properties.put(DatabaseHistory.SKIP_UNPARSEABLE_DDL_STATEMENTS.name(), "true");
        properties.put(DatabaseHistory.STORE_ONLY_CAPTURED_TABLES_DDL.name(), "true");
    }

    private String getHistoryPath() {
        String path = getHome() + HISTORY_DB_DAT;
        log.info("保存数据库历史,database.history.file.filename的path:[{}]", path);
        return path;
    }

    private void monitorDataBase(Properties properties) {
        JdbcUrlParser jdbcUrlParser = new JdbcUrlParser(dataSourceWrapper.getDataSourceProperties().determineUrl());
        properties.put(RelationalDatabaseConnectorConfig.HOSTNAME.name(), jdbcUrlParser.getHost());
        properties.put(RelationalDatabaseConnectorConfig.PORT.name(), jdbcUrlParser.getPort());
        properties.put(RelationalDatabaseConnectorConfig.USER.name(),
                       dataSourceWrapper.getDataSourceProperties().getUsername());
        properties.put(RelationalDatabaseConnectorConfig.PASSWORD.name(),
                       dataSourceWrapper.getDataSourceProperties().getPassword());
        properties.put(RelationalDatabaseConnectorConfig.INCLUDE_SCHEMA_CHANGES.name(),
                       debeziumProperties.isOnlyMonitorDataChanged()? "false" : "true");
    }

    private Properties debeziumGlobalConfig() {
        Properties properties = new Properties();
        properties.put(EmbeddedEngine.CONNECTOR_CLASS.name(), dataSourceWrapper.getConnector());
        properties.put(EmbeddedEngine.ENGINE_NAME.name(), getUniqueEngineName());

        // 数据偏移量保存配置
        properties.put(EmbeddedEngine.OFFSET_FLUSH_INTERVAL_MS.name(),
                       debeziumProperties.getCluster().getOffsetStore().getIntervalMs());
        if (debeziumProperties.getCluster().isEnable()) {
            // 集群部署,偏移量保存在nacos,这可能产生覆盖,有早有晚,服务重启可能有重复数据
            properties.put(EmbeddedEngine.OFFSET_STORAGE.name(),
                           NacosOffsetStorage.class.getName());
            properties.put(NacosOffsetStorage.OffSET_DATA_ID,
                           debeziumProperties.getCluster().getOffsetStore().getDataId());
            properties.put(NacosOffsetStorage.OffSET_GROUP,
                           debeziumProperties.getCluster().getOffsetStore().getGroup());
        } else {
            // 单机部署
            properties.put(EmbeddedEngine.OFFSET_STORAGE.name(), FileOffsetBackingStore.class.getName());
            properties.put(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, getOffsetFilePath());
        }
        // kafka的配置,必须>0 否则不会消费数据
        properties.put(CommonConnectorConfig.MAX_QUEUE_SIZE_IN_BYTES.name(), 1 << 24); //max queue bytes size is 16 Mb
        properties.put(CommonConnectorConfig.QUERY_FETCH_SIZE.name(), 1 << 10);//max records num info memory
        return properties;
    }

    private String getUniqueEngineName() {
        String name = TY_DBZ_ENGINE_NAME + IpUtils.getLocalAddress() + "_" + SequenceUtil.makeStringId();
        log.info("Unique name for this connector instance:[{}]", name);
        return name;
    }

    private String getOffsetFilePath() {
        String path = getHome() + OFFSET_DATA_LOG;
        log.info("非集群配置的offset.storage.file.filename:{}", path);
        return path;
    }

    public String getHome() {
        String home = System.getProperty("HOME");
        if (StringUtils.hasText(home)) {
            return home;
        }
        home = System.getenv("HOME");
        if (StringUtils.hasText(home)) {
            return home;
        }
        return "/var";
    }

}