package com.uh.rds.tester.data;

import com.uh.console.domain.RdsNode;
import com.uh.console.domain.vo.RdsServiceNodesVo;
import com.uh.rds.testing.config.TestConfig;
import com.uh.rds.testing.conn.RdsConnectInfo;
import com.uh.console.client.ConsoleWebClient;
import com.uh.rds.testing.utils.RdsConnectionUtils;
import com.uh.rds.testing.validator.DataValidatorArguments;
import com.uh.rds.testing.validator.DataValidatorResult;
import com.uh.rds.testing.validator.DataValidatorRunner;
import org.junit.jupiter.api.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;

import static com.uh.rds.testing.config.TestConfigManager.getConsoleClientConf;
import static com.uh.rds.testing.config.TestConfigManager.getTestConfig;
import static com.uh.console.client.RdsServiceHelper.deleteLastShard;
import static com.uh.console.client.RdsServiceHelper.getServiceInfo;
import static com.uh.rds.testing.utils.BatchDataTestUtils.addValidators;
import static com.uh.rds.testing.utils.BatchDataTestUtils.generateDataFiles;
import static com.uh.rds.testing.base.DataValidatorStep.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;


/**
 * 集群模式下集群分片变更测试
 *  测试目的：测试在集群模式下，集群分片变更后数据是否能够和变更前保持一致。
 *  测试环境：
 *  - 接口登录开启的控制台。
 *  - 具备至少1个中心节点。
 *  - 在控制台中建立4主4从的集群模式RDS服务。
 *  - 建议主节点和从节点分别放置在不同的主机上，这样可验证跨网络的主从数据同步。
 *  - 配置要求，节点的备份存盘开启（cfg.xml中Server.DataDump设置为5),
 *    建议配置优化，加大同步队列大小和数量：
 *         <Sync>
 *             <ListNumbers>6</ListNumbers>
 *             <ListLength>20000</ListLength>
 *         </Sync>
 *  测试过程：
 *    1. 生成测试数据，数据类型包括 string, set, hash, list, zset, stream
 *    2. 在集群初始状态下（集群分片和节点完整运行），测试数据是否能够正常写入，并在从节点衍生数据是否完整一致。
 *    3. 删除指定数量的分片，把删除的分片再添加回去。
 *    4. 集群分片变更后，验证数据是否和之前一样。
 *
 */
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
public class ClusterChangeTest {
    Logger logger = LoggerFactory.getLogger(this.getClass());

    private ConsoleWebClient client;
    private TestConfig conf;
    private RdsServiceNodesVo serviceNodesVo;
    private RdsConnectInfo connection;
    DataValidatorRunner tester;

    File dataBase = new File("data/ClusterChangeTest");

    Map<String, File> dataFiles = Map.of(
        "string", new File(dataBase, "string-data.csv"),
        "set", new File(dataBase, "set-data.csv"),
        "hash", new File(dataBase, "hash-data.csv"),
        "list", new File(dataBase, "list-data.csv"),
        "zset", new File(dataBase, "zset-data.csv"),
        "stream",  new File(dataBase, "stream-data.csv")
    );


    @BeforeAll
    public void init() {
        this.conf = getTestConfig("ClusterChangeTest");
        Long serviceId = conf.getLong("testServiceId");

        client = ConsoleWebClient.getInstance(getConsoleClientConf());
        client.login();

        serviceNodesVo = getServiceInfo(serviceId, client);
        connection = RdsConnectionUtils.getRdsConnection(serviceNodesVo);


        int dataCount = conf.getInteger("dataCount");
        int subDataCount = conf.getInteger("subDataCount");
        generateDataFiles(dataFiles, false, dataCount, subDataCount, new int[] {512, 1024});


        DataValidatorArguments config = new DataValidatorArguments();
        config.setThreadCount(conf.getInteger("threadCount"));
        config.setValidateFromSlave(false);
        config.setValidateReversed(true);
        config.setValidateInterval(conf.getLong("validateInterval"));
        tester = new DataValidatorRunner(config);
        addValidators(tester, dataFiles);
//        RdsConnectInfo connection = new RdsConnectInfo(DeployMode.CLUSTER, null, false);
//        connection.addShard(new Shard(0, "0-5460"));
//        connection.addShard(new Shard(1, "5461-10921"));
//        connection.addShard(new Shard(2, "10922-16383"));
//        connection.addEndpoint(new Endpoint("192.168.0.60", 6331, true,0));
//        connection.addEndpoint(new Endpoint("192.168.0.60", 6332, false,0));
//        connection.addEndpoint(new Endpoint("192.168.0.60", 6333, true,1));
//        connection.addEndpoint(new Endpoint("192.168.0.60", 6334, false,1));
//        connection.addEndpoint(new Endpoint("192.168.0.60", 6335, true,2));
//        connection.addEndpoint(new Endpoint("192.168.0.60", 6336, false,2));

    }


    /**
     * 集群在初始状态（没有做增减变化时），测试数据是否能够正常写入，并在从节点衍生数据是否完整一致。
     */
    @Test
    @Order(1)
    public void dataInitialTest() throws Exception{
        if(! tester.isReady()) {
            tester.prepareThreadsData(connection);
        }
        RdsConnectionUtils.flushDb(connection);
        DataValidatorResult result = tester.runTest(PROCESS, VALIDATE);
        System.out.println(result.summary());
        Assertions.assertTrue(result.isPassed());
    }


    /**
     * 集群分片变更操作
     * 删除指定数量的分片，把删除的分片再添加回去。
     */
    @Test
    @Order(2)
    public void shardsChange() throws Exception  {
        Long testServiceId = conf.getLong("testServiceId");
        long waitTime = conf.getInteger("waitSeconds") * 1000;
        int deleteShards = conf.getInteger("deleteShards");

        //第一步，为RdsService删除一个分片
        RdsServiceNodesVo servNodesVo = client.getRdsServiceActions().getServiceNodes(testServiceId);
        List<RdsNode> deletedNodes = deleteLastShard(servNodesVo, deleteShards); //删除最后一个分片
        Set<Long> deletedNodeIds = deletedNodes.stream().map(RdsNode::getNodeId).collect(Collectors.toSet());
        client.getRdsServiceActions().updateService(servNodesVo); //更新集群配置

        logger.info("Shards was deleted! Wait {}s ...", waitTime / 1000);

        //第二步， 等待，等待集群变更完成
        try { Thread.sleep(waitTime); }
        catch (InterruptedException e) { throw new RuntimeException(e); }

        //第三步，为RdsService添加一个分片，并更新
        //       把之前删除的节点的nodeId设置为null, 标识该节点是要新添加的节点
        for(RdsNode node : serviceNodesVo.getNodes()) {
            if(deletedNodeIds.contains(node.getNodeId())) {
                logger.info("Node " + node.getNodeName() + " was deleted and re-added!");
                node.setNodeId(null);
                node.setNodeStatus("none");
            }
            else {
                logger.info("Node " + node.getNodeName() + " was remained!");
                node.getChangedProps().add("slot");//更新slot
            }
        }

        client.getRdsServiceActions().updateService(serviceNodesVo); //更新集群配置
        Thread.sleep(18000); //等待配置生效
        client.getRdsServiceActions().startServiceWait(testServiceId); //启动集群

        //第四步， 等待10s，等待集群变更完成
        try { Thread.sleep(waitTime); }
        catch (InterruptedException e) { throw new RuntimeException(e); }
    }

    /**
     * 集群分片变更后，验证数据是否和之前一样。
     */
    @Test
    @Order(3)
    public void testAfterChanges() throws Exception {
        if(! tester.isReady()) {
            tester.prepareThreadsData(connection);
        }

        DataValidatorResult result = tester.runTest(VALIDATE);
        System.out.println(result.summary());
        Assertions.assertTrue(result.isPassed());
    }





}
