/*
 *
 *  * Copyright (c) 2022-2022 Huawei Technologies Co.,Ltd.
 *  *
 *  * openGauss is licensed under Mulan PSL v2.
 *  * You can use this software according to the terms and conditions of the Mulan PSL v2.
 *  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 *  * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 *  * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 *  * See the Mulan PSL v2 for more details.
 *
 */

package org.opengauss.portalcontroller.tools.sqlserver;

import org.apache.logging.log4j.util.Strings;
import org.opengauss.jdbc.PgConnection;
import org.opengauss.portalcontroller.PortalControl;
import org.opengauss.portalcontroller.constant.*;
import org.opengauss.portalcontroller.entity.MigrationConfluentInstanceConfig;
import org.opengauss.portalcontroller.exception.PortalException;
import org.opengauss.portalcontroller.logmonitor.listener.LogFileListener;
import org.opengauss.portalcontroller.software.Confluent;
import org.opengauss.portalcontroller.software.ConnectorSqlserver;
import org.opengauss.portalcontroller.software.Software;
import org.opengauss.portalcontroller.status.ChangeStatusTools;
import org.opengauss.portalcontroller.task.Plan;
import org.opengauss.portalcontroller.task.Task;
import org.opengauss.portalcontroller.tools.Tool;
import org.opengauss.portalcontroller.utils.FileUtils;
import org.opengauss.portalcontroller.utils.InstallMigrationUtils;
import org.opengauss.portalcontroller.utils.JdbcUtils;
import org.opengauss.portalcontroller.utils.KafkaUtils;
import org.opengauss.portalcontroller.utils.LogViewUtils;
import org.opengauss.portalcontroller.utils.ParamsUtils;
import org.opengauss.portalcontroller.utils.ProcessUtils;
import org.opengauss.portalcontroller.utils.PropertitesUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.*;

import static org.opengauss.portalcontroller.PortalControl.toolsConfigParametersTable;
import static org.opengauss.portalcontroller.PortalControl.toolsMigrationParametersTable;
import static org.opengauss.portalcontroller.constant.Debezium.Connector.LOG_PATTERN_PATH;
import static org.opengauss.portalcontroller.utils.ProcessUtils.checkProcess;

/**
 * IncrementalMigrationTool
 *
 * @date :2023/11/3 15:22
 * @description: IncrementalMigrationTool
 * @version: 1.1
 * @since 1.1
 */
public class IncrementalMigrationTool extends ParamsConfig implements Tool {
    private static final Logger LOGGER = LoggerFactory.getLogger(IncrementalMigrationTool.class);
    private final LogFileListener incrementalLogFileListener = new LogFileListener();

    private final SqlserverFullMigrationTool fullMigrationTool = new SqlserverFullMigrationTool();

    public static final Integer MAX_TABLE = 10000;
    private static final String GET_CDC_TABLE = "select concat(\'cdc.\' , [capture_instance] , \'_CT\') as ct_name from cdc.change_tables";
    private static final String GET_CDC_DATA_PREFIX = "select [__$start_lsn],[__$seqval],[__$operation] from ";

    private static List<String> currentChangeTableList = null;
    private static List<List> currentLSNLists = null;


    Map<String, Object> sourceMap = null;
    Map<String, Object> sinkMap = null;

    Map<String, Object> sourceConnectMap = null;
    Map<String, Object> sinkConnectMap = null;

    Map<String, Object> logMap = null;

    /**
     * Change incremental migration parameters.
     */
    @Override
    public void initDataBaseParams() {
        String sqlserverDatabaseHost = toolsMigrationParametersTable.get(Sqlserver.DATABASE_HOST);
        sourceMap.put(Debezium.Source.HOST, sqlserverDatabaseHost);
        String sqlserverDatabasePort = toolsMigrationParametersTable.get(Sqlserver.DATABASE_PORT);
        sourceMap.put(Debezium.Source.PORT, sqlserverDatabasePort);
        String sqlserverUserName = toolsMigrationParametersTable.get(Sqlserver.USER);
        sourceMap.put(Debezium.Source.USER, sqlserverUserName);
        String sqlserverUserPassword = toolsMigrationParametersTable.get(Sqlserver.PASSWORD);
        sourceMap.put(Debezium.Source.PASSWORD, sqlserverUserPassword);
        String sqlserverDatabaseName = toolsMigrationParametersTable.get(Sqlserver.DATABASE_NAME);
        sourceMap.put(Debezium.Source.WHITELIST, sqlserverDatabaseName);
        setSourceTables();
        String openGaussSchemaName = toolsMigrationParametersTable.get(Opengauss.DATABASE_SCHEMA);
        sinkMap.put(Debezium.Sink.SCHEMA_MAPPING, sqlserverDatabaseName + ":" + openGaussSchemaName);
        String opengaussUserName = toolsMigrationParametersTable.get(Opengauss.USER);
        sinkMap.put(Debezium.Sink.Opengauss.USER, opengaussUserName);
        String opengaussUserPassword = toolsMigrationParametersTable.get(Opengauss.PASSWORD);
        sinkMap.put(Debezium.Sink.Opengauss.PASSWORD, opengaussUserPassword);
        String opengaussDatabaseHost = toolsMigrationParametersTable.get(Opengauss.DATABASE_HOST);
        String opengaussDatabasePort = toolsMigrationParametersTable.get(Opengauss.DATABASE_PORT);
        String opengaussDatabaseName = toolsMigrationParametersTable.get(Opengauss.DATABASE_NAME);
        String opengaussDebeziumUrl =
                "jdbc:opengauss://" + opengaussDatabaseHost + ":" + opengaussDatabasePort + "/" + opengaussDatabaseName + "?loggerLevel=OFF";
        sinkMap.put(Debezium.Sink.Opengauss.URL, opengaussDebeziumUrl);
    }

    /**
     * set database tables.
     */
    private void setSourceTables() {
        String tables = toolsMigrationParametersTable.get(Sqlserver.DATABASE_TABLE);
        if (!Plan.isRuleEnable(tables)) {
            return;
        }
        sourceMap.put(Debezium.Source.TABLELIST, tables);
    }


    /**
     * Find offset.
     *
     * @throws PortalException the portal exception
     */
    private static void findMaxLsn() throws PortalException{
        currentChangeTableList = null;
        currentLSNLists = null;
        LOGGER.info("Find snapshot for full and incremental migration.");
        String maxLsn = "00000000:00000000:0000";
        Hashtable<String, String> maxLsnHashtable = new Hashtable<>();
        maxLsnHashtable.put(Lsn.MAX_LSN,maxLsn);
        String offsetPath = toolsConfigParametersTable.get(Debezium.Source.INCREMENTAL_CONFIG_PATH);
        PropertitesUtils.changePropertiesParameters(maxLsnHashtable, offsetPath);
//        try{
//            Connection sqlserverConnection = JdbcUtils.getSqlserverConnection();
////            currentChangeTableList = getCurrentChangeTableList(sqlserverConnection);
////            currentLSNLists = currentLSNLists(sqlserverConnection);
////            for (int i = 0 ; i < currentLSNLists.size() ; i++){
////                int currentLSNListsSize = currentLSNLists.get(i).size();
////                HashMap modelmap = (HashMap) currentLSNLists.get(i).get(currentLSNListsSize-1);
////                String currentMaxLsn = ((String) modelmap.get("commit_lsn")).toLowerCase(Locale.ROOT);
////                String currentMaxLsnWithColon = currentMaxLsn.substring(0,8) + ":" + currentMaxLsn.substring(8,16) + ":" + currentMaxLsn.substring(16,20);
////                if(currentMaxLsnWithColon.compareTo(maxLsn) > 0){
////                    maxLsn = currentMaxLsnWithColon;
////                }
////            }
////            LOGGER.info("Find snapshot from sqlserver full migration, max commit lsn: {}.",
////                    maxLsn);
//            Hashtable<String, String> maxLsnHashtable = new Hashtable<>();
//            maxLsnHashtable.put(Lsn.MAX_LSN,maxLsn);
//            String offsetPath = toolsConfigParametersTable.get(Debezium.Source.INCREMENTAL_CONFIG_PATH);
//            PropertitesUtils.changePropertiesParameters(maxLsnHashtable, offsetPath);
//        }catch (SQLException e){
//            LOGGER.warn("Failed when makeing snapshot during incremental migration, maybe your cdc is not available.");
//        }
    }

    private static List<String> getCurrentChangeTableList(Connection conn) throws SQLException {
        // 创建Statement
        Statement stmt = conn.createStatement();
        // 执行查询
        ResultSet rs = stmt.executeQuery(GET_CDC_TABLE);

        // 处理结果
        List<String> currentChangeTableList = new ArrayList<>();
        while (rs.next()) {
            currentChangeTableList.add(rs.getString("ct_name"));
        }
        System.out.println(currentChangeTableList);
        return currentChangeTableList;
    }

    private static List<List> currentLSNLists(Connection conn) throws SQLException {
        List<List> CurrentLSNLists = new ArrayList<List>();

        Statement stmt = conn.createStatement();
        for (String tableName : currentChangeTableList) {
            ResultSet rs2 = stmt.executeQuery(GET_CDC_DATA_PREFIX + tableName);
            List<HashMap> CurrentLSNList = new ArrayList<HashMap>();
            while (rs2.next()) {
                HashMap<String, String> modelMap = new HashMap<>();
                modelMap.put("change_lsn", rs2.getString(2));
                modelMap.put("commit_lsn", rs2.getString(1));
                modelMap.put("operation", rs2.getString(3));
                CurrentLSNList.add(modelMap);
            }
            System.out.println(CurrentLSNList);
            CurrentLSNLists.add(CurrentLSNList);
        }
        return CurrentLSNLists;
    }

    /**
     * confluent params kafka zk register IP port
     */
    @Override
    public void initKafkaParams() {
        // connect-avro-standalone.properties 文件修改
        MigrationConfluentInstanceConfig portalConfig = MigrationConfluentInstanceConfig.getInstanceFromPortalConfig();
        Hashtable<String, String> connectAvroStandalonePropChangeParam = new Hashtable<>();
        String schemaRegistryPrefix = "http://";
        connectAvroStandalonePropChangeParam.put("bootstrap.servers", portalConfig.getKafkaIpPort());
        connectAvroStandalonePropChangeParam.put("key.converter.schema.registry.url",
                schemaRegistryPrefix + portalConfig.getSchemaRegistryIpPort());
        connectAvroStandalonePropChangeParam.put("value.converter.schema.registry.url",
                schemaRegistryPrefix + portalConfig.getSchemaRegistryIpPort());
        sourceConnectMap.putAll(connectAvroStandalonePropChangeParam);
        sinkConnectMap.putAll(connectAvroStandalonePropChangeParam);
        // sqlserver-sink.properties文件修改
        sinkMap.put("record.breakpoint.kafka.bootstrap.servers", portalConfig.getKafkaIpPort());
        // sqlserver-source.properties文件修改
        sourceMap.put("database.history.kafka.bootstrap.servers", portalConfig.getKafkaIpPort());
        sourceMap.put("kafka.bootstrap.server", portalConfig.getKafkaIpPort());
    }

    /**
     * install
     *
     * @param isDownload isDownload
     * @return boolean
     */
    @Override
    public boolean install(boolean isDownload) {
        ArrayList<Software> softwareArrayList = new ArrayList<>();
        softwareArrayList.add(new Confluent());
        softwareArrayList.add(new ConnectorSqlserver());
        InstallMigrationUtils installMigrationUtils = new InstallMigrationUtils();
        for (Software software : softwareArrayList) {
            try {
                installMigrationUtils.installSingleMigrationSoftware(software, isDownload);
            } catch (PortalException e) {
                LOGGER.error("install failed", e);
                return false;
            }
        }
        LogViewUtils.outputResult(true, Command.Install.Sqlserver.IncrementalMigration.DEFAULT);
        return true;
    }

    /**
     * initWorkSpaceParams
     *
     * @param workspaceId  workspaceId
     */
    @Override
    public void initWorkSpaceParams(String workspaceId) {
        sourceMap.put("name", "sqlserver-source-" + workspaceId);
        sourceMap.put("database.server.name", "sqlserver_server_" + workspaceId);
        sourceMap.put("database.server.id", String.valueOf(ProcessUtils.getCurrentPortalPid()));
        sourceMap.put("database.history.kafka.topic", "sqlserver_server_" + workspaceId + "_history");
        sourceMap.put("transforms.route.regex", "^" + "sqlserver_server_" + workspaceId + "(.*)");
        sourceMap.put("transforms.route.replacement", "sqlserver_server_" + workspaceId + "_topic");
        sourceMap.put("schema.history.internal.kafka.topic", "my-schema-history-topic-" + workspaceId);

        //schema.history.internal.kafka.bootstrap.servers
        sourceMap.put("topic.prefix", workspaceId);

        String incrementalFolder = toolsConfigParametersTable.get(Status.INCREMENTAL_FOLDER);
        sourceMap.put("source.process.file.path", incrementalFolder);
        sourceMap.put("create.count.info.path", incrementalFolder);
        sinkMap.put("name", "sqlserver-sink-" + workspaceId);
        sinkMap.put("topics", "sqlserver_server_" + workspaceId + "_topic");
        sinkMap.put("record.breakpoint.kafka.topic", "sqlserver_bp_" + workspaceId + "_topic");
        try {
            FileUtils.createFile(incrementalFolder, false);
        } catch (PortalException e) {
            e.setRequestInformation("Create incremental migration folder status folder failed.Please ensure the "
                    + "config folder " + incrementalFolder + " is available");
            LOGGER.error(e.toString());
            return;
        }
        sinkMap.put("sink.process.file.path", incrementalFolder);
        sinkMap.put("create.count.info.path", incrementalFolder);

        sinkMap.put("fail.sql.path", incrementalFolder);
        if (Strings.isNotBlank(toolsMigrationParametersTable
                .get(MigrationParameters.Log.GLOBAL_LOG_LEVEL))) {
            logMap.put("log4j.rootLogger",
                    toolsMigrationParametersTable.get(MigrationParameters.Log.GLOBAL_LOG_LEVEL)
                            .toUpperCase() + ", stdout, connectAppender");
        }
        Hashtable<String, String> hashtable = toolsConfigParametersTable;
        KafkaUtils.changekafkaLogParam(workspaceId + "_source", hashtable.get(LOG_PATTERN_PATH));
        int sourcePort = StartPort.REST_SQLSERVER_SOURCE + PortalControl.portId * 10;
        int port = ParamsUtils.getAvailablePorts(sourcePort, 1, 1000).get(0);
        sourceConnectMap.put("rest.port", String.valueOf(port));
    }

    /**
     *
     * initInteractionParams
     */
    @Override
    void initInteractionParams() {
        if (toolsMigrationParametersTable.containsKey(Lsn.MAX_LSN)) {
            sourceMap.put(Lsn.MAX_LSN, toolsMigrationParametersTable.get(Lsn.MAX_LSN));
        }
        sinkMap.put("xlog.location", toolsConfigParametersTable.get(Status.XLOG_PATH));
    }

    /**
     * init
     *
     * @param workspaceId workspaceId
     * @return boolean
     */
    @Override
    public boolean init(String workspaceId) {
        if (checkAnotherConnectExists()) {
            LOGGER.error("Another connector is running.Cannot run incremental migration whose workspace id is "
                    + workspaceId + " .");
            return false;
        }
        try {
            findMaxLsn();
        } catch (PortalException e) {
            LOGGER.error(e.toString());
            PortalControl.shutDownPortal(e.toString());
            return false;
        }
        LOGGER.info("incremental migration tool start init");
        initConfigChangeParamsMap();
        setAllParams(workspaceId);
        changeAllConfig();
        deleteParamsConifg();
        Task.startTaskMethod(Method.Name.CONNECT_SOURCE, 5000, "", incrementalLogFileListener);
        return true;
    }

    /**
     *
     * initParmasFromEnvForAddAndChange
     */
    @Override
    public void initParmasFromEnvForAddAndChange() {
        sourceMap.putAll(ParamsUtils.changeToolsPropsParameters(ToolsConfigEnum.DEBEZIUM_SQLSERVER_SOURCE));
        sinkMap.putAll(ParamsUtils.changeToolsPropsParameters(ToolsConfigEnum.DEBEZIUM_SQLSERVER_SINK));
    }

    /**
     *
     * initParmasFromEnvForDelete
     */
    @Override
    public void initParmasFromEnvForDelete() {
        String sqlserverSourceParams = System.getProperty(ToolsConfigEnum.DEBEZIUM_SQLSERVER_SOURCE.getConfigName());
        if (Strings.isNotBlank(sqlserverSourceParams)) {
            configDeleteParamsMap.put(Debezium.Source.INCREMENTAL_CONFIG_PATH, List.of(sqlserverSourceParams.split(",")));
        }
        String sqlserverSinkParams = System.getProperty(ToolsConfigEnum.DEBEZIUM_SQLSERVER_SINK.getConfigName());
        if (Strings.isNotBlank(sqlserverSinkParams)) {
            configDeleteParamsMap.put(Debezium.Sink.INCREMENTAL_CONFIG_PATH, List.of(sqlserverSinkParams.split(",")));
        }
    }

    /**
     *
     * initConfigChangeParamsMap
     */
    @Override
    public void initConfigChangeParamsMap() {
        sourceMap = new HashMap<>();
        sinkMap = new HashMap<>();
        sourceConnectMap = new HashMap<>();
        sinkConnectMap = new HashMap<>();
        logMap = new HashMap<>();
        this.configPropsChangeParamsMap.put(Debezium.Source.INCREMENTAL_CONFIG_PATH, sourceMap);
        this.configPropsChangeParamsMap.put(Debezium.Sink.INCREMENTAL_CONFIG_PATH, sinkMap);
        this.configPropsChangeParamsMap.put(Debezium.Source.CONNECTOR_PATH, sourceConnectMap);
        this.configPropsChangeParamsMap.put(Debezium.Sink.CONNECTOR_PATH, sinkConnectMap);
        this.configPropsChangeParamsMap.put(LOG_PATTERN_PATH, logMap);
    }

    /**
     *
     * start
     */
    @Override
    public boolean start(String workspaceId) {
        if (PortalControl.status != Status.ERROR) {
            PortalControl.status = Status.START_INCREMENTAL_MIGRATION;
        }
        Hashtable<String, String> hashtable = toolsConfigParametersTable;
        String standaloneSinkFilePath = hashtable.get(Debezium.Sink.CONNECTOR_PATH);
        KafkaUtils.changekafkaLogParam(workspaceId + "_sink", hashtable.get(LOG_PATTERN_PATH));
        int sinkPort = StartPort.REST_SQLSERVER_SINK + PortalControl.portId * 10;
        int port = ParamsUtils.getAvailablePorts(sinkPort, 1, 1000).get(0);
        PropertitesUtils.changeSinglePropertiesParameter("rest.port", String.valueOf(port), standaloneSinkFilePath);
        Task.startTaskMethod(Method.Name.CONNECT_SINK, 5000, "", incrementalLogFileListener);
        if (PortalControl.status != Status.ERROR) {
            PortalControl.status = Status.RUNNING_INCREMENTAL_MIGRATION;
        }
        stop();
        return true;
    }

    /**
     *
     * stop
     */
    @Override
    public boolean stop() {
        while (!Plan.stopPlan && !Plan.stopIncrementalMigration
                && !PortalControl.taskList.contains(Command.Start.Sqlserver.INCREMENTAL_CHECK)) {
            LOGGER.info("Incremental migration is running...");
            ProcessUtils.sleepThread(1000, "running incremental migraiton");
        }
        LOGGER.info("Plan.stopIncrementalMigration = {} Plan.stopPlan={}  PortalControl.taskList.contains(Command"
                        + ".Start.Sqlserver.INCREMENTAL_CHECK)={}", Plan.stopIncrementalMigration,
                Plan.stopPlan, PortalControl.taskList.contains(Command.Start.Sqlserver.INCREMENTAL_CHECK));
        List<String> taskThreadList = List.of(Method.Run.CONNECT_SINK, Method.Run.CONNECT_SOURCE);
        if (Plan.stopIncrementalMigration) {
            beforeStop(taskThreadList);
        }
        return true;
    }

    /**
     * Check another connect exists boolean.
     *
     * @return the boolean
     */
    public boolean checkAnotherConnectExists() {
        ArrayList<String> connectorParameterList = new ArrayList<>();
        connectorParameterList.add(Method.Run.REVERSE_CONNECT_SOURCE);
        connectorParameterList.add(Method.Run.CONNECT_SOURCE);
        connectorParameterList.add(Method.Run.CONNECT_SINK);
        connectorParameterList.add(Method.Run.REVERSE_CONNECT_SINK);
        for (String connectorParameter : connectorParameterList) {
            if (ProcessUtils.getCommandPid(Task.getTaskProcessMap().get(connectorParameter)) != -1) {
                return true;
            }
        }
        return false;
    }

    /**
     * uninstall
     * @return boolean
     */
    public boolean uninstall() {
        Hashtable<String, String> hashtable = toolsConfigParametersTable;
        String errorPath = PortalControl.portalErrorPath;
        ArrayList<String> filePaths = new ArrayList<>();
        filePaths.add(hashtable.get(Debezium.Confluent.PATH));
        filePaths.add(hashtable.get(Debezium.Connector.SQLSERVER_PATH));
        filePaths.add(hashtable.get(Debezium.Connector.OPENGAUSS_PATH));
        filePaths.add(hashtable.get(Debezium.Kafka.TMP_PATH));
        filePaths.add(hashtable.get(Debezium.Zookeeper.TMP_PATH));
        InstallMigrationUtils.removeSingleMigrationToolFiles(filePaths, errorPath);
        return true;
    }

    /**
     * checkStatus
     *
     * @param workspaceId workspaceId
     * @return boolean
     */
    @Override
    public boolean checkStatus(String workspaceId) {
        checkProcess(Method.Name.CONNECT_SINK);
        checkProcess(Method.Name.CONNECT_SOURCE);
        return false;
    }

    /**
     * reportProgress
     *
     * @param workspaceId workspaceId
     * @return boolean
     */
    @Override
    public boolean reportProgress(String workspaceId) {
        String sourceIncrementalStatusPath = "";
        String sinkIncrementalStatusPath = "";
        File directory = new File(toolsConfigParametersTable.get(Status.INCREMENTAL_FOLDER));
        if (directory.exists() && directory.isDirectory() && directory.listFiles() != null) {
            for (File file : Objects.requireNonNull(directory.listFiles())) {
                if (file.getName().contains("forward-source-process")) {
                    sourceIncrementalStatusPath = file.getAbsolutePath();
                } else if (file.getName().contains("forward-sink-process")) {
                    sinkIncrementalStatusPath = file.getAbsolutePath();
                }
            }
        }
        String incrementalStatusPath = toolsConfigParametersTable.get(Status.INCREMENTAL_PATH);
        if (new File(sourceIncrementalStatusPath).exists() && new File(sinkIncrementalStatusPath).exists()) {
            ChangeStatusTools.changeIncrementalStatus(sourceIncrementalStatusPath, sinkIncrementalStatusPath,
                    incrementalStatusPath, true);
        }
        return true;
    }

    /**
     * Before stop.
     *
     * @param taskThreadList the task thread list
     */
    public void beforeStop(List<String> taskThreadList) {
        LOGGER.info("beforeStop start...");
        if (PortalControl.status != Status.ERROR) {
            PortalControl.status = Status.INCREMENTAL_MIGRATION_FINISHED;
            Plan.pause = true;
            ProcessUtils.sleepThread(50, "pausing the plan");
        }
        if (PortalControl.taskList.contains(Command.Start.Sqlserver.REVERSE)) {
            if (PortalControl.taskList.contains(Command.Start.Sqlserver.FULL)
                    && SqlserverFullMigrationTool.shouldDetachReplica) {
                fullMigrationTool.runDetach();
            }
            try (PgConnection conn = JdbcUtils.getPgConnection()) {
                JdbcUtils.changeAllTable(conn);
                JdbcUtils.createLogicalReplicationSlot(conn);
            } catch (SQLException e) {
                PortalException portalException = new PortalException("SQL exception", "select global variable",
                        e.getMessage());
                portalException.setRequestInformation("Create slot failed.");
                ReverseMigrationTool.refuseReverseMigrationReason = portalException.getMessage();
                LOGGER.error(portalException.toString());
            }
        }
        for (String taskThread : taskThreadList) {
            Task.stopTaskMethod(taskThread);
        }
        PortalControl.status = Status.INCREMENTAL_MIGRATION_STOPPED;
        LOGGER.info("Incremental migration stopped.");
    }
}
