package com.uh.rds.tester.data;

import com.uh.console.domain.vo.RdsServiceNodesVo;
import com.uh.rds.testing.config.TestConfig;
import com.uh.rds.testing.config.TestConfigManager;
import com.uh.rds.testing.conn.RdsConnectInfo;
import com.uh.console.client.ConsoleWebClient;
import com.uh.console.client.RdsServiceHelper;
import com.uh.rds.testing.utils.RdsConnectionUtils;
import com.uh.rds.testing.validator.DataValidatorArguments;
import com.uh.rds.testing.validator.DataValidatorResult;
import com.uh.rds.testing.validator.DataValidatorRunner;
import org.junit.jupiter.api.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.util.Map;

import static com.uh.rds.testing.config.TestConfigManager.getConsoleClientConf;
import static com.uh.rds.testing.utils.BatchDataTestUtils.*;
import static com.uh.rds.testing.base.DataValidatorStep.PROCESS;
import static com.uh.rds.testing.base.DataValidatorStep.VALIDATE;

/**
 * 主从同步测试。
 * 测试环境：
 *  - 一个测试登录开启的控制台。
 *  - 具备至少1个中心节点。
 *  - 在控制台中建立 建立互备集群模式的RDS服务，每个集群至少2个分片。建议集群为3主3从。
 *  - 配置要求，节点的备份存盘开启（cfg.xml中Server.DataDump设置为5),
 *    加大同步队列大小和数量
 *         <Sync>
 *             <ListNumbers>6</ListNumbers>
 *             <ListLength>20000</ListLength>
 *         </Sync>
 * 测试case说明：
 * 1. 生成测试数据，数据类型包括 string, set, hash, list, zset, stream
 * 2. 对集群1进行写入操作，再去验证集群2的主节点的数据是否写入的数据一致。
 * 注意每次测试前需要清空data/ClusterBackupsSyncTest/目录下的数据文件，或执行deleteDataFiles()方法删除旧数据文件。
 */
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
public class ClusterBackupsSyncTest {
    Logger logger = LoggerFactory.getLogger(this.getClass());

    private ConsoleWebClient client;
    private RdsServiceNodesVo mainServiceNodesVo;
    private RdsServiceNodesVo backupServiceNodesVo;
    private RdsConnectInfo connection;
    private TestConfig conf;
    private DataValidatorRunner tester;

    File dataBase = new File("data/ClusterBackupsSyncTest");
    Map<String, File> dataFiles = Map.of(
            "string", new File(dataBase, "string-data.csv"),
            "set", new File(dataBase, "set-data.csv"),
            "hash", new File(dataBase, "hash-data.csv"),
            "list", new File(dataBase, "list-data.csv"),
            "zset", new File(dataBase, "zset-data.csv"),
            "stream",  new File(dataBase, "stream-data.csv")
    );

    /**
     * 初始化测试变量
     *  - 读取测试配置到 conf
     *  - 获取被测试的 RDS服务 信息
     */
    @BeforeAll
    public void init() {
        logger.info("MasterSlaveSyncTest init ...");
        conf = TestConfigManager.getTestConfig("ClusterBackupsSyncTest");
        //conf = TestConfigManager.getTestConfig("MasterSlaveSyncTest-2216_P1");
        client = ConsoleWebClient.getInstance(getConsoleClientConf());
        client.login();


        // 把两个互备集群的主节点全部放到一个connection中，主集群的主节点作为主节点，备集群的主节点作为从节点（验证节点）
        // 两个集群的分片信息必须保持一致，并把主集群的分片信息直接设置为connection的分片信息
        Long mainServiceId = conf.getLong("mainServiceId");
        Long backupServiceId = conf.getLong("backupServiceId");
        mainServiceNodesVo = RdsServiceHelper.getServiceInfo(mainServiceId, client);
        backupServiceNodesVo = RdsServiceHelper.getServiceInfo(backupServiceId, client);
        RdsConnectInfo mainConn = RdsConnectionUtils.getRdsConnection(mainServiceNodesVo);
        RdsConnectInfo backConn = RdsConnectionUtils.getRdsConnection(backupServiceNodesVo);
        connection = new RdsConnectInfo(mainConn.getMode(), mainConn.getPassword());
        mainConn.getShards().forEach(shard -> {
            connection.addShard(shard);
        });
        mainConn.getEndpoints().forEach(e -> {
            if(e.isMaster()) {
                connection.addEndpoint(e);
            }
        });
        backConn.getEndpoints().forEach(e -> {
            if(e.isMaster()) {
                e.setMaster(false);
                connection.addEndpoint(e);
            }
        });

        System.out.println("connection: " + connection);


        int dataCount = conf.getInteger("dataCount");
        int valueSize = conf.getInteger("valueSize");
        int subDataCount = conf.getInteger("subDataCount");
        generateDataFiles(dataFiles, false, dataCount, subDataCount, new int[] {valueSize, valueSize + 24});


        DataValidatorArguments config = new DataValidatorArguments();
        config.setThreadCount(conf.getInteger("threadCount"));
        config.setValidateInterval(conf.getLong("validateInterval"));
        config.setValidateFromSlave(true);
        config.setValidateReversed(true);
        config.setRandomOrder(true);
        tester = new DataValidatorRunner(config);
        addValidators(tester, dataFiles);
    }


    /**
     * 测试各类型数据高速写入主节点后，在规定的时效内从节点数据能同步完成。
     */
    @Test
    @Order(2)
    public void testDataSynchronized() throws Exception {
        if(! tester.isReady()) {
            tester.prepareThreadsData(connection);
        }

        RdsConnectionUtils.flushDb(connection);
        DataValidatorResult result = tester.runTest(PROCESS, VALIDATE);
        logger.info(result.summary());
        Assertions.assertTrue(result.isPassed());
    }



    @Test
    @Order(3)
    @Disabled
    public void deleteDataFiles() {
        delDataFiles(dataFiles);
    }

}
