package com.uh.rds.tester.standalone;

import com.uh.rds.testing.config.TestConfig;
import com.uh.rds.testing.config.TestConfigManager;
import com.uh.rds.testing.conn.ConnectionMode;
import com.uh.rds.testing.conn.Endpoint;
import com.uh.rds.testing.conn.RdsConnectInfo;
import com.uh.rds.testing.conn.Shard;
import com.uh.rds.testing.data.FileTask;
import com.uh.rds.testing.data.TestDataGenerator;
import com.uh.rds.testing.utils.RdsConnectionUtils;
import com.uh.rds.testing.validator.DataValidatorArguments;
import com.uh.rds.testing.validator.DataValidatorResult;
import com.uh.rds.testing.validator.DataValidatorRunner;
import com.uh.rds.testing.validator.types.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.util.ArrayList;
import java.util.List;

import static com.uh.rds.testing.base.DataValidatorStep.*;

/**
 * 测试主备节点数据同步：验证同步数据一致性和时效性；
 * 测试两集群间的数据同步：验证同步数据一致性和时效性；
 * 测试环境：
 *  - 具备至少1个中心节点。
 *  - 配置要求，关闭节点的备份存盘（cfg.xml中Server.DataDump设置为0),
 *    加大同步队列大小和数量
 *         <Sync>
 *             <ListNumbers>6</ListNumbers>
 *             <ListLength>20000</ListLength>
 *         </Sync>
 *  - 主备节点数据同步的环境要求：搭建集群模式RDS服务，主备比例为1:1，建议分片数量在2-5之间。
 *  - 两集群间的数据同步的环境要求，搭建两个集群模式RDS服务且两个，主备比例为1:1，建议分片数量在2-5之间。
 * 测试目录文件和配置说明：
 *  - 此测试采用main方法执行，不依赖于JUnit测试框架。
 *  - 测试配置文件运行的当前路径下的 conf/test-config.yml 文件（如果找不到会从classpath: /test-config.yml 加载）。
 *  - 测试数据生成在当前路径下的 data/ClustersDataSyncTest 目录下，在执行时测试数据文件会生成放在此目录下（如果文件不存在就会重新再次生成）
 *    配置文件中配置下面信息：
 *    - id: "ClustersDataSyncTest"
 *     name: "ClustersDataSyncTest"
 *     description: "集群间数据同步测试"
 *     configs:
 *       #是否清空插入数据
 *       clearData: false
 *       # 准备多少批次的数据
 *       batchCount: 20
 *       # 每种数据类型的数据量
 *       dataCount: 10000
 *       # 并发线程数
 *       threadCount: 10
 *       # 验证时间间隔
 *       validateInterval: 10
 *       # 集群的password
 *       # password: "123456"
 *       # 分片信息
 *       shards: [ "0-8191", "8192-16383" ]
 *       # 写入的集群节点
 *       writingEndpoints: ["localhost:6321", "localhost:6323"]
 *       # 读取的集群节点
 *       readingEndpoints: ["localhost:6321", "localhost:6323"]
 *  - 测试结果输出到控制台中和日志文件中，日志文件在当前路径下的 logs 目录下。其中错误日志文件名为 error.log，其他信息输出在system.log文件。
 * 测试运行过程说明：
 *  - 如果测试数据文件不存在就会重新生成并放置在 data/ClustersDataSyncTest/目录下。
 *  - 清空被测的RDS缓存中的所有数据。
 *  - 从文件中加载数据，按类型加载string-data.csv、set-data.csv、hash-data.csv、list-data.csv、zset-data.csv、stream-data.csv；
 *  - 把数据根据分片拆分到不同的Thread中，Thread的数量等于配置中的threadCount数量；
 *  - 同时启动并执行测试线程进行验证，每个线程的执行顺序以伪代码形式给出：
 *     //typesData是每种类型的数据, typesData[0]=string-data, typesData[1]=set-data, typesData[2]=hash-data, typesData[3]=list-data, typesData[4]=zset-data, typesData[5]=stream-data,
 *     for(data : typesData) {
 *         process(data); //处理插入数据
 *         sleep(validateInterval); //等待validateInterval毫秒
 *         validate(data); //验证数据是否和插入的一致
 *         clean(data); //删除测试数据，并验证删除是否全部生效
 *     }
 *  - 测试结束打印测试摘要。println(result.summary())
 */
public class ClustersDataSyncTest {

    private Logger logger = LoggerFactory.getLogger(ClustersDataSyncTest.class);

    private TestConfig conf;

    private RdsConnectInfo connection;

    File baseDir = new File("data/ClustersDataSyncTest");
    String stringDataFile = "string-data.csv";
    String setDataFile = "set-data.csv";
    String hashDataFile = "hash-data.csv";
    String listDataFile = "list-data.csv";
    String zsetDataFile = "zset-data.csv";
    String streamDataFile = "stream-data.csv";


    /**
     * 初始化测试环境
     */
    public void init() {
        logger.info("ClustersDataSyncTest init ...");
        conf = TestConfigManager.getTestConfig("ClustersDataSyncTest");

        // 读取配置生成连接信息
        List<String> shards = conf.getList("shards");
        List<String> writingEndpoints = conf.getList("writingEndpoints");
        List<String> readingEndpoints = conf.getList("readingEndpoints");
        String password = conf.getString("password");

        connection = new RdsConnectInfo(ConnectionMode.CLUSTER, password);
        for(int i = 0; i < shards.size(); i++) {
            connection.addShard(new Shard(i, shards.get(i)));
        }

        for(int i = 0; i < writingEndpoints.size(); i++) {
            String endpoint = writingEndpoints.get(i);
            String[] parts = endpoint.split(":");
            connection.addEndpoint(new Endpoint(parts[0], Integer.parseInt(parts[1]), true, i));
        }

        for(int i = 0; i < readingEndpoints.size(); i++) {
            String endpoint = readingEndpoints.get(i);
            String[] parts = endpoint.split(":");
            connection.addEndpoint(new Endpoint(parts[0], Integer.parseInt(parts[1]), false, i));
        }

        logger.info("connection: " + connection);
    }


    /**
     * 生成测试数据，第一次生成后之后可重复使用。
     * 会判断数据文件是否存在，如果存在则不再生成。
     */
    public void generateTestData() {
        logger.info("Generating test data ...");
        int dataCount = conf.getInteger("dataCount");
        int batchCount = conf.getInteger("batchCount");
        int valueLengthMin = conf.getInteger("valueLengthMin");
        int valueLengthMax = conf.getInteger("valueLengthMax");
        TestDataGenerator dataGenerator = new TestDataGenerator();

        boolean hasDataTask = false;
        for(int i = 0 ; i < batchCount ; i++) {
            File batchDir = new File(baseDir, "batch-" + i);
            if(batchDir.exists() == false) { //批量目录不存在才生成数据
                File stringDataFile = new File(batchDir, this.stringDataFile);
                File setDataFile = new File(batchDir, this.setDataFile);
                File hashDataFile = new File(batchDir, this.hashDataFile);
                File listDataFile = new File(batchDir, this.listDataFile);
                File zsetDataFile = new File(batchDir, this.zsetDataFile);
                File streamDataFile = new File(batchDir, this.streamDataFile);

                dataGenerator.addTask(new FileTask(stringDataFile, "string", dataCount, 1, new int[] {valueLengthMin, valueLengthMax}));
                dataGenerator.addTask(new FileTask(setDataFile, "set", dataCount, 1, new int[] {valueLengthMin, valueLengthMax}));
                dataGenerator.addTask(new FileTask(hashDataFile, "hash", dataCount, 1, new int[] {valueLengthMin, valueLengthMax}));
                dataGenerator.addTask(new FileTask(listDataFile, "list", dataCount, 1, new int[] {valueLengthMin, valueLengthMax}));
                dataGenerator.addTask(new FileTask(zsetDataFile, "zset", dataCount, 1, new int[] {valueLengthMin, valueLengthMax}));
                dataGenerator.addTask(new FileTask(streamDataFile, "stream", dataCount, 1, new int[] {valueLengthMin, valueLengthMax}));

                hasDataTask = true;
            }
        }

        if(hasDataTask) {
            dataGenerator.executeTasks();
        }
        else {
            logger.info("Test data already exists, skip generating ...");
        }

    }


    public void insertData() throws Exception {
        int batchCount = conf.getInteger("batchCount");

        List<DataValidatorResult.ValidatorSummary> validatorSummaryList = new ArrayList<>();
        for (int i = 0; i < batchCount; i++) {
            DataValidatorRunner tester = new DataValidatorRunner();

            File batchDir = new File(baseDir, "batch-" + i);
            File stringDataFile = new File(batchDir, this.stringDataFile);
            File setDataFile = new File(batchDir, this.setDataFile);
            File hashDataFile = new File(batchDir, this.hashDataFile);
            File listDataFile = new File(batchDir, this.listDataFile);
            File zsetDataFile = new File(batchDir, this.zsetDataFile);
            File streamDataFile = new File(batchDir, this.streamDataFile);


            tester.addValidator(StringDataValidator.class, stringDataFile);
            tester.addValidator(SetDataValidator.class, setDataFile);
            tester.addValidator(HashDataValidator.class, hashDataFile);
            tester.addValidator(ListDataValidator.class, listDataFile);
            tester.addValidator(ZsetDataValidator.class, zsetDataFile);
            tester.addValidator(StreamDataValidator.class, streamDataFile);

            DataValidatorArguments config = new DataValidatorArguments();
            config.setValidateFromSlave(true);
            config.setThreadCount(conf.getInteger("threadCount"));
            config.setValidateInterval(conf.getLong("validateInterval"));
            tester.setConfig(config);

            tester.prepareThreadsData(connection);
            logger.info("Tester-" + i + " prepared.");
            DataValidatorResult result  = null;
            if(conf.getBoolean("clearData")){
                result = tester.runTest(PROCESS, VALIDATE, CLEAR);
            }else {
                result = tester.runTest(PROCESS, VALIDATE);
            }
            if(result.isPassed()) {
                System.out.println(result.summary());
                System.out.println("Batch-" + i + " data inserted!");
            } else {
                System.err.println(result.summary());
                System.out.println("Batch-" + i + " data inserted!");
            }
            validatorSummaryList.addAll(result.validatorSummaryList());

        }

        DataValidatorResult.validatorSummaryList(validatorSummaryList);
    }

    public void test() throws Exception {
        logger.info("ClustersDataSyncTest test ...");
        init();
        generateTestData();
        //是否执行前清空数据
        if(conf.getBoolean("clearDataF")){
            RdsConnectionUtils.flushDb(connection);
        }
        insertData();
    }


    public static void main(String[] args) throws Exception  {
        ClustersDataSyncTest test = new ClustersDataSyncTest();
        test.test();
    }
}
