package com.darcytech.debezium.core.work;

import com.darcytech.debezium.core.kafka.connect.KafkaConnectClient;
import com.darcytech.debezium.core.kafka.connect.dto.ConnectorTask;
import com.darcytech.debezium.core.kafka.connect.model.Config;
import com.darcytech.debezium.core.kafka.connect.model.Task;
import com.darcytech.debezium.core.pool.ConnectionWrap;
import com.darcytech.debezium.core.pool.ConnectorConnectionBinding;
import com.darcytech.debezium.core.pool.ConnectorInfo;
import com.darcytech.debezium.core.pool.MetaData;
import com.darcytech.debezium.core.pool.RegisterGroup;

import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;

import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class RegisterExecutor {

    private static final Logger LOGGER = LoggerFactory.getLogger(RegisterExecutor.class);

    private final KafkaConnectClient kafkaConnectClient;

    private final AdminClient kafkaAdminClient;

    private final ConnectorConnectionBinding connectorConnectionBinding;

    private int repairTime = 0;

    private final Map<String, Integer> hostRepairStatistics = new HashMap<>();

    public RegisterExecutor(String kafkaServers, String kafkaConnectAddresses, ConnectorConnectionBinding connectorConnectionBinding) {
        this.kafkaConnectClient = new KafkaConnectClient(Arrays.asList(kafkaConnectAddresses.split(",")));
        this.connectorConnectionBinding = connectorConnectionBinding;

        Properties props = new Properties();
        props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
        this.kafkaAdminClient = AdminClient.create(props);
    }

    public void pullALLConnectors() {
        try {
            List<String> connectors = kafkaConnectClient.getConnectors();
            for (String connector : connectors) {
                Task taskStatus = kafkaConnectClient.getTaskStatus(connector);
                if (taskStatus == null || taskStatus.getState() == null) {
                    continue;
                }
                if (!taskStatus.getState().equals(Task.RUNNING)) {
                    kafkaConnectClient.deleteConnector(connector);
                } else {
                    ConnectorTask configAndTasks = kafkaConnectClient.getConfigAndTasks(connector);
                    if (configAndTasks == null) {
                        continue;
                    }
                    Config config = configAndTasks.getConfig();
                    this.connectorConnectionBinding.registerInfo(new ConnectorInfo(config));
                }
            }

        } catch (Throwable t) {
            throw new RuntimeException("Failed to initialize registered executor", t);
        }
    }

    public void doRepairConnectorFailed(Map<String, MetaData> metaDataMap) {
        try {
            if (needReset()) {
                hostRepairStatistics.clear();
            }
            List<String> connectors = kafkaConnectClient.getConnectors();
            for (String connector : connectors) {
                Task taskStatus = kafkaConnectClient.getTaskStatus(connector);
                if (taskStatus != null && taskStatus.getState() != null && !taskStatus.getState().equals(Task.RUNNING)) {
                    if (hostRepairStatistics.get(connector) != null && hostRepairStatistics.get(connector) > 3) {
                        LOGGER.error("connector:{} repair too more !!!", connector);
                        continue;
                    }
                    MetaData metaData = metaDataMap.get(connector);
                    if (metaData != null && metaData.getConnectionWrap().getHealthy()) {
                        hostRepairStatistics.merge(connector, 1, Integer::sum);
                        this.connectorConnectionBinding.registerInfo(metaData.getConnectorInfo());
                    }
                }
            }
        } catch (Throwable t) {
            LOGGER.error("doRepairConnectorFailed error:", t);
        }
    }

    public boolean registerKafkaConnector(RegisterGroup registerGroup, Map<String, MetaData> metaDataMap) {

        try {

            List<String> connectors = kafkaConnectClient.getConnectors();

            clearFailedConnectors(registerGroup, metaDataMap, connectors);

            return doRegisterConnectorsAndCreateTopic(registerGroup, connectors);

        } catch (Throwable t) {
            LOGGER.error("registerKafkaConnector error: ", t);
            return false;
        }

    }

    private boolean doRegisterConnectorsAndCreateTopic(RegisterGroup registerGroup, List<String> connectors) throws IOException {
        boolean success = true;
        Set<String> tables = new HashSet<>();

        for (Config config : registerGroup.getConfigs()) {

            String connector = config.getName();
            if (!connectors.contains(connector)) {
                String[] dataBaseTables = config.getTableIncludeList().split(",");
                Arrays.stream(dataBaseTables).forEach(dataBaseTable -> tables.add(topicPrefix(config.getDataBaseServerName()) + dataBaseTable.split("\\.")[1]));
            }

            Task taskStatus = kafkaConnectClient.getTaskStatus(connector);
            if (taskStatus != null && taskStatus.getState() != null && !taskStatus.getState().equals(Task.RUNNING)) {
                kafkaConnectClient.deleteConnector(connector);
            }
            if (!kafkaConnectClient.putConnector(connector, config)) {
                success = false;
            }
        }

        int numPartitions = 1;
        short replicationFactor = 1;

        List<NewTopic> newTopics = tables.stream().map(table -> new NewTopic(table, numPartitions, replicationFactor)).collect(Collectors.toList());
        this.kafkaAdminClient.createTopics(newTopics);

        return success;
    }

    private void clearFailedConnectors(RegisterGroup registerGroup, Map<String, MetaData> metaDataMap, List<String> connectors) throws IOException {
        for (String connector : connectors) {
            if (connector.startsWith(registerGroup.getGroup())) {

                Task taskStatus = kafkaConnectClient.getTaskStatus(connector);

                if (taskStatus != null && taskStatus.getState() != null &&
                        !taskStatus.getState().equals(Task.RUNNING) && !metaDataMap.containsKey(connector)) {

                    kafkaConnectClient.deleteConnector(connector);
                }
            }
        }
    }

    private boolean needReset() {
        if (repairTime >= 1000) {
            repairTime = 0;
        }
        repairTime++;
        return repairTime % 5 == 0;
    }

    private String topicPrefix(String group) {
        return group + "-";
    }

}
