package com.uh.rds.tester.data;

import com.uh.console.client.ConsoleWebClient;
import com.uh.console.client.RdsNodeActions;
import com.uh.console.client.RdsServiceHelper;
import com.uh.console.domain.RdsNode;
import com.uh.console.domain.vo.RdsServiceNodesVo;
import com.uh.rds.testing.config.TestConfig;
import com.uh.rds.testing.config.TestConfigManager;
import com.uh.rds.testing.conn.RdsConnectInfo;
import com.uh.rds.testing.data.FileTask;
import com.uh.rds.testing.data.TestDataGenerator;
import com.uh.rds.testing.utils.RdsConnectionUtils;
import com.uh.rds.testing.validator.DataValidatorArguments;
import com.uh.rds.testing.validator.DataValidatorResult;
import com.uh.rds.testing.validator.DataValidatorRunner;
import com.uh.rds.testing.validator.types.*;
import org.junit.jupiter.api.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;

import static com.uh.rds.testing.config.TestConfigManager.getConsoleClientConf;
import static com.uh.rds.testing.base.DataValidatorStep.*;

/**
 * 测试全量数据同步
 *  测试目的：测试全量同步过程中，还不断的有数据写入缓存，整个过程是否能保证主从节点数据的一致性和完整性。
 *  测试环境：
 *  - 接口登录开启的控制台。
 *  - 具备至少1个中心节点。
 *  - 在控制台中建立1主1从的集群模式RDS服务。
 *  - 配置要求，关闭节点的备份存盘（cfg.xml中Server.DataDump设置为0),
 *    建议配置优化，加大同步队列大小和数量：
 *         <Sync>
 *             <ListNumbers>6</ListNumbers>
 *             <ListLength>20000</ListLength>
 *         </Sync>
 *    建议修改配置，全量同步前清除数据：
 *         <Data>
 *             <ClearDataBeforeFullSynchronization>true</ClearDataBeforeFullSynchronization>
 *         </Data>
 *  测试过程：
 *   准备2个tester(testerBulk, testerString, testerSet, testerZSet, testerList, testerHash, testerStream)，一个用于全量同步测试，其他用于增量数据同步测试。
 *   1. 首先使用 testerBulk 进行全量数据同步测试，testerBulk.runTest(PROCESS, VALIDATE)，主读主写
 *   2. 上一步完成后，同时并发启动 testerString，testerSet, testerZSet, testerList, testerHash, testerStream 进行增量数据同步测试， runTest(PROCESS, VALIDATE)，主写从读。这一步要设置时间间隔足够长，要其能够能够从全量同步开始前持续到全量同步结束后一段时间。
 *   3，testerBulk设置为 validateFromSlave=true, validateReversed=false, 然后再在从节点上执行 runTest(VALIDATE)，验证从节点数据是否和主节点一致。
 *
 */
@Nested
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
public class MasterSlaveBulkTest {

    Logger logger = LoggerFactory.getLogger(this.getClass());

    private ConsoleWebClient client;
    private RdsServiceNodesVo serviceNodesVo;
    private RdsConnectInfo connection;
    private TestConfig conf;
    private DataValidatorRunner tester;
    private DataValidatorRunner additionTester; //增量测试，用于在全量同步过程中持续写入数据

    File dataBase = new File("data/MasterSlaveBulkTest");
    File stringDataFile = new File(dataBase, "string-data.csv");
    File setDataFile = new File(dataBase, "set-data.csv");
    File hashDataFile = new File(dataBase, "hash-data.csv");
    File listDataFile = new File(dataBase, "list-data.csv");
    File zsetDataFile = new File(dataBase, "zset-data.csv");
    File streamDataFile = new File(dataBase, "stream-data.csv");

    File addStringDataFile = new File(dataBase, "add-string-data.csv");
    File addSetDataFile = new File(dataBase, "add-set-data.csv");
    File addHashDataFile = new File(dataBase, "add-hash-data.csv");
    File addListDataFile = new File(dataBase, "add-list-data.csv");
    File addZsetDataFile = new File(dataBase, "add-zset-data.csv");
    File addStreamDataFile = new File(dataBase, "add-stream-data.csv");

    /**
     * 初始化测试变量
     *  - 读取测试配置到 conf
     *  - 获取被测试的 RDS服务 信息
     */
    @BeforeAll
    public void init() {
        logger.info("MasterSlaveBulkTest init ...");
        conf = TestConfigManager.getTestConfig("MasterSlaveBulkTest");
        //conf = TestConfigManager.getTestConfig("MasterSlaveSyncTest-2216_P1");
        client = ConsoleWebClient.getInstance(getConsoleClientConf());
        client.login();

        Long testServiceId = conf.getLong("testServiceId");
        serviceNodesVo = RdsServiceHelper.getServiceInfo(testServiceId, client);
        connection = RdsConnectionUtils.getRdsConnection(serviceNodesVo);

        DataValidatorArguments config = new DataValidatorArguments();
        config.setThreadCount(conf.getInteger("threadCount"));



        tester = new DataValidatorRunner(config);
        tester.addValidator(StringDataValidator.class, stringDataFile);
        tester.addValidator(SetDataValidator.class, setDataFile);
        tester.addValidator(HashDataValidator.class, hashDataFile);
        tester.addValidator(ListDataValidator.class, listDataFile);
        tester.addValidator(ZsetDataValidator.class, zsetDataFile);
        tester.addValidator(StreamDataValidator.class, streamDataFile);

        DataValidatorArguments additionConfig = new DataValidatorArguments();
        additionConfig.setThreadCount(conf.getInteger("additionThreadCount"));
        additionConfig.setRandomOrder(true);
        additionConfig.setValidateFromSlave(true);
        additionTester = new DataValidatorRunner(additionConfig);
        additionTester.addValidator(StringDataValidator.class, addStringDataFile);
        additionTester.addValidator(SetDataValidator.class, addSetDataFile);
        additionTester.addValidator(HashDataValidator.class, addHashDataFile);
        additionTester.addValidator(ListDataValidator.class, addListDataFile);
        additionTester.addValidator(ZsetDataValidator.class, addZsetDataFile);
        additionTester.addValidator(StreamDataValidator.class, addStreamDataFile);
    }

    /**
     * 生成测试数据，第一次生成后之后可重复使用。
     * 会判断数据文件是否存在，如果存在则不再生成。
     */
    @Test
    @Order(1)
    public void generateTestData() {
        TestDataGenerator dataGenerator = new TestDataGenerator();

        int dataCount = conf.getInteger("dataCount");
        int subDataCount = conf.getInteger("subDataCount");

        dataGenerator.addNoExistTask(new FileTask(stringDataFile, "string", dataCount, 1));
        dataGenerator.addNoExistTask(new FileTask(setDataFile, "set", dataCount, subDataCount));
        dataGenerator.addNoExistTask(new FileTask(hashDataFile, "hash", dataCount, subDataCount));
        dataGenerator.addNoExistTask(new FileTask(listDataFile, "list", dataCount, subDataCount));
        dataGenerator.addNoExistTask(new FileTask(zsetDataFile, "zset", dataCount, subDataCount));
        dataGenerator.addNoExistTask(new FileTask(streamDataFile, "stream", dataCount, subDataCount));

        int additionDataCount = conf.getInteger("additionDataCount");
        int additionSubDataCount = conf.getInteger("additionSubDataCount");
        dataGenerator.addNoExistTask(new FileTask(addStringDataFile, "string", additionDataCount, 1));
        dataGenerator.addNoExistTask(new FileTask(addSetDataFile, "set", additionDataCount, additionSubDataCount));
        dataGenerator.addNoExistTask(new FileTask(addHashDataFile, "hash", additionDataCount, additionSubDataCount));
        dataGenerator.addNoExistTask(new FileTask(addListDataFile, "list", additionDataCount, additionSubDataCount));
        dataGenerator.addNoExistTask(new FileTask(addZsetDataFile, "zset", additionDataCount, additionSubDataCount));
        dataGenerator.addNoExistTask(new FileTask(addStreamDataFile, "stream", additionDataCount, additionSubDataCount));


        int taskCount = dataGenerator.getTaskCount();
        if(taskCount > 0) {
            logger.info("Generating test data ...");
            int executed = dataGenerator.executeTasks();
            Assertions.assertEquals(taskCount, executed);
        }
        else {
            logger.info("Test data files already exists, skip generating ...");
        }

    }


    /**
     * 清空数据
     * 停止从节点；
     * 写入主节点并验证
     */
    @Test
    @Order(2)
    public void testBulkDataWrite() throws Exception {
        if(! tester.isReady()) {
            tester.prepareThreadsData(connection);
        }

        RdsConnectionUtils.flushDb(connection);//主从一起清空数据


        tester.getConfig().setValidateFromSlave(false);
        RdsNodeActions nodeAct = client.getRdsNodeActions();

        //获得从节点
        RdsNode slaveNode =  serviceNodesVo.getNodes().stream().filter(node -> !node.getMasterNode())
                .findFirst().orElse(null);
        nodeAct.stopNodeWait(slaveNode.getNodeId());  //停止从节点

        //验证时通过主机节点验证
        tester.getConfig().setValidateFromSlave(false);

        DataValidatorResult result = tester.runTest(PROCESS, VALIDATE);
        System.out.println(result.summary());
        Assertions.assertTrue(result.isPassed());
    }


    /**
     * 在全量同步过程中持续写入数据
     * @throws Exception
     */
    @Test
    @Order(3)
    public void testAdditionData() throws Exception{
         if(! additionTester.isReady()) {
             additionTester.prepareThreadsData(connection);
         }

        //开始增量数据写入测试
        additionTester.startTest(PROCESS);

        //启动从节点
        RdsNodeActions nodeAct = client.getRdsNodeActions();
        RdsNode slaveNode =  serviceNodesVo.getNodes().stream().filter(node -> !node.getMasterNode())
                .findFirst().orElse(null);
        nodeAct.startNodeWait(slaveNode.getNodeId());  //启动从节点

        //等待增量数据写入完成
        DataValidatorResult processResult = additionTester.waitTestResult();
        Assertions.assertTrue(processResult.isPassed());
        System.out.println(processResult.summary());

        int waitSeconds = conf.getInteger("additionValidateWait");
        logger.info("Waiting {} seconds for addition data to be validated...", waitSeconds);
        Thread.sleep(waitSeconds * 1000L); //等待一段时间，确保增量数据写入完成

        //在从节点验证增量数据是否完整一致
        logger.info("Validating addition data in slave node...");
        //additionTester.getConfig().setValidateFromSlave(false); //TODO 暂时从主节点验证
        DataValidatorResult validateResult =  additionTester.runTest(VALIDATE); //验证增量数据写入结果
        System.out.println(validateResult.summary());
        Assertions.assertTrue(validateResult.isPassed());
    }


    /**
     * 清除增量数据
     *  - 执行 additionTester.runTest(CLEAR)
     *  - 验证清除结果
     */
    @Test
    @Order(4)
    public void clearAdditionalData() {
        if(! additionTester.isReady()) {
            additionTester.prepareThreadsData(connection);
        }
        //清除增量数据
        DataValidatorResult result =  additionTester.runTest(CLEAR);
        System.out.println(result.summary());
        logger.info("Additional data cleared.");
    }


    /**
     * 验证从节点数据
     *  - 设置 validateFromSlave=true
     *  - 执行 runTest(VALIDATE)
     *  - 验证从节点数据是否和主节点一致。
     */
    @Test
    @Order(4)
    public void validateBulkInSlave() throws Exception {
        if(! tester.isReady()) {
            tester.prepareThreadsData(connection);
        }

        //验证从节点数据
        tester.getConfig().setValidateFromSlave(true);

        DataValidatorResult result = tester.runTest(VALIDATE);
        System.out.println(result.summary());
        Assertions.assertTrue(result.isPassed());
    }


}
