package com.leon.datalink.core.persistence;

import akka.actor.ActorSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;

/**
 * 第五阶段并发和集群测试
 * 
 * 测试DataLink Actor持久化功能在并发和集群环境下的表现：
 * 1. 高并发持久化测试
 * 2. 多线程安全性测试
 * 3. 集群节点间数据一致性测试
 * 4. 分布式锁和同步测试
 * 5. 负载均衡测试
 * 6. 集群故障转移测试
 * 7. 数据分片和复制测试
 * 8. 性能压力测试
 * 
 * @author leon
 */
public class Phase5ConcurrentClusterTest {
    
    private static final Logger logger = LoggerFactory.getLogger(Phase5ConcurrentClusterTest.class);
    
    private PersistenceConfigManager configManager;
    private PersistenceMetricsManager metricsManager;
    private SnapshotStrategyManager snapshotManager;
    private List<ActorSystem> clusterNodes;
    
    // 测试参数
    private static final int CLUSTER_SIZE = 3;
    private static final int CONCURRENT_THREADS = 20;
    private static final int OPERATIONS_PER_THREAD = 500;
    private static final int STRESS_TEST_DURATION_MS = 30000; // 30秒
    
    public static void main(String[] args) {
        logger.info("=== DataLink Actor持久化功能第五阶段并发和集群测试 ===");
        
        try {
            Phase5ConcurrentClusterTest test = new Phase5ConcurrentClusterTest();
            test.runConcurrentClusterTest();
            
            logger.info("=== 第五阶段并发和集群测试完成 ===");
            
        } catch (Exception e) {
            logger.error("第五阶段并发和集群测试失败", e);
            System.exit(1);
        }
    }
    
    public void runConcurrentClusterTest() throws Exception {
        logger.info("开始第五阶段并发和集群测试...");
        
        try {
            // 1. 初始化测试环境
            initializeTestEnvironment();
            
            // 2. 高并发持久化测试
            testHighConcurrencyPersistence();
            
            // 3. 多线程安全性测试
            testMultiThreadSafety();
            
            // 4. 集群节点间数据一致性测试
            testClusterDataConsistency();
            
            // 5. 分布式锁和同步测试
            testDistributedLockingAndSync();
            
            // 6. 负载均衡测试
            testLoadBalancing();
            
            // 7. 集群故障转移测试
            testClusterFailover();
            
            // 8. 数据分片和复制测试
            testDataShardingAndReplication();
            
            // 9. 性能压力测试
            testPerformanceStress();
            
            logger.info("第五阶段并发和集群测试全部通过！");
            
        } finally {
            // 清理测试环境
            cleanupTestEnvironment();
        }
    }
    
    /**
     * 初始化测试环境
     */
    private void initializeTestEnvironment() {
        logger.info("初始化并发和集群测试环境...");
        
        configManager = PersistenceConfigManager.getInstance();
        metricsManager = PersistenceMetricsManager.getInstance();
        snapshotManager = SnapshotStrategyManager.getInstance();
        
        // 启用持久化
        configManager.setGlobalPersistenceEnabled(true);
        configManager.setActorTypePersistence("ClusterTestActor", true);
        
        // 清空指标
        metricsManager.clearMetrics();
        
        // 创建集群节点
        clusterNodes = new ArrayList<>();
        for (int i = 0; i < CLUSTER_SIZE; i++) {
            ActorSystem node = ActorSystem.create("cluster-node-" + i);
            clusterNodes.add(node);
        }
        
        logger.info("✓ 并发和集群测试环境初始化完成，集群节点数: {}", CLUSTER_SIZE);
    }
    
    /**
     * 高并发持久化测试
     */
    private void testHighConcurrencyPersistence() throws Exception {
        logger.info("测试高并发持久化...");
        
        ExecutorService executor = Executors.newFixedThreadPool(CONCURRENT_THREADS);
        CountDownLatch latch = new CountDownLatch(CONCURRENT_THREADS);
        AtomicLong totalOperations = new AtomicLong(0);
        AtomicInteger errorCount = new AtomicInteger(0);
        
        long startTime = System.currentTimeMillis();
        
        // 启动并发任务
        for (int i = 0; i < CONCURRENT_THREADS; i++) {
            final int threadId = i;
            executor.submit(() -> {
                try {
                    String actorType = "ConcurrentActor";
                    String nodeId = "concurrent-node-" + threadId;
                    
                    for (int j = 0; j < OPERATIONS_PER_THREAD; j++) {
                        metricsManager.recordPersistOperation(actorType, nodeId, 
                                                             5 + (j % 10), 
                                                             512 + (j % 256));
                        totalOperations.incrementAndGet();
                        
                        // 随机延迟模拟真实场景
                        if (j % 50 == 0) {
                            Thread.sleep(1);
                        }
                    }
                } catch (Exception e) {
                    logger.error("并发持久化测试异常", e);
                    errorCount.incrementAndGet();
                } finally {
                    latch.countDown();
                }
            });
        }
        
        // 等待所有任务完成
        latch.await(60, TimeUnit.SECONDS);
        executor.shutdown();
        
        long endTime = System.currentTimeMillis();
        long duration = endTime - startTime;
        double throughput = (double) totalOperations.get() * 1000 / duration;
        
        logger.info("高并发持久化测试结果:");
        logger.info("  总操作数: {}", totalOperations.get());
        logger.info("  错误数: {}", errorCount.get());
        logger.info("  持续时间: {}ms", duration);
        logger.info("  吞吐量: {:.2f}ops/s", throughput);
        
        // 验证结果
        assert totalOperations.get() == CONCURRENT_THREADS * OPERATIONS_PER_THREAD : 
            "操作数不正确: " + totalOperations.get();
        assert errorCount.get() == 0 : "存在错误: " + errorCount.get();
        assert throughput > 1000 : "吞吐量过低: " + throughput;
        
        logger.info("✓ 高并发持久化测试通过");
    }
    
    /**
     * 多线程安全性测试
     */
    private void testMultiThreadSafety() throws Exception {
        logger.info("测试多线程安全性...");
        
        String actorType = "SafetyTestActor";
        String nodeId = "safety-test-node";
        
        ExecutorService executor = Executors.newFixedThreadPool(10);
        CountDownLatch latch = new CountDownLatch(10);
        AtomicInteger sharedCounter = new AtomicInteger(0);
        
        // 多线程同时操作同一个Actor
        for (int i = 0; i < 10; i++) {
            executor.submit(() -> {
                try {
                    for (int j = 0; j < 100; j++) {
                        // 模拟并发访问共享资源
                        int currentValue = sharedCounter.incrementAndGet();
                        metricsManager.recordPersistOperation(actorType, nodeId, 5, currentValue);
                        
                        // 验证数据一致性
                        PersistenceMetricsManager.ActorMetrics metrics = 
                            metricsManager.getActorMetrics(actorType, nodeId);
                        assert metrics != null : "指标不应该为null";
                    }
                } catch (Exception e) {
                    logger.error("多线程安全性测试异常", e);
                } finally {
                    latch.countDown();
                }
            });
        }
        
        latch.await(30, TimeUnit.SECONDS);
        executor.shutdown();
        
        // 验证最终状态
        PersistenceMetricsManager.ActorMetrics finalMetrics = 
            metricsManager.getActorMetrics(actorType, nodeId);
        assert finalMetrics.getTotalPersistCount() == 1000 : 
            "持久化计数不正确: " + finalMetrics.getTotalPersistCount();
        assert sharedCounter.get() == 1000 : "共享计数器不正确: " + sharedCounter.get();
        
        logger.info("✓ 多线程安全性测试通过");
    }
    
    /**
     * 集群节点间数据一致性测试
     */
    private void testClusterDataConsistency() throws Exception {
        logger.info("测试集群节点间数据一致性...");
        
        String actorType = "ConsistencyTestActor";
        Map<String, Long> nodeOperationCounts = new ConcurrentHashMap<>();
        
        // 在不同集群节点上执行操作
        ExecutorService executor = Executors.newFixedThreadPool(CLUSTER_SIZE);
        CountDownLatch latch = new CountDownLatch(CLUSTER_SIZE);
        
        for (int i = 0; i < CLUSTER_SIZE; i++) {
            final int nodeIndex = i;
            executor.submit(() -> {
                try {
                    String nodeId = "cluster-node-" + nodeIndex;
                    long operationCount = 0;
                    
                    for (int j = 0; j < 200; j++) {
                        metricsManager.recordPersistOperation(actorType, nodeId, 10, 1024);
                        operationCount++;
                        
                        // 模拟网络延迟
                        if (j % 20 == 0) {
                            Thread.sleep(5);
                        }
                    }
                    
                    nodeOperationCounts.put(nodeId, operationCount);
                    
                } catch (Exception e) {
                    logger.error("集群一致性测试异常", e);
                } finally {
                    latch.countDown();
                }
            });
        }
        
        latch.await(30, TimeUnit.SECONDS);
        executor.shutdown();
        
        // 等待数据同步
        Thread.sleep(2000);
        
        // 验证数据一致性
        long totalExpectedOperations = nodeOperationCounts.values().stream()
                                                          .mapToLong(Long::longValue)
                                                          .sum();
        
        long actualTotalOperations = 0;
        for (String nodeId : nodeOperationCounts.keySet()) {
            PersistenceMetricsManager.ActorMetrics metrics = 
                metricsManager.getActorMetrics(actorType, nodeId);
            actualTotalOperations += metrics.getTotalPersistCount();
        }
        
        logger.info("集群数据一致性验证:");
        logger.info("  预期总操作数: {}", totalExpectedOperations);
        logger.info("  实际总操作数: {}", actualTotalOperations);
        
        assert actualTotalOperations == totalExpectedOperations : 
            "集群数据不一致: " + actualTotalOperations + " vs " + totalExpectedOperations;
        
        logger.info("✓ 集群节点间数据一致性测试通过");
    }
    
    /**
     * 分布式锁和同步测试
     */
    private void testDistributedLockingAndSync() throws Exception {
        logger.info("测试分布式锁和同步...");
        
        String actorType = "LockTestActor";
        String nodeId = "lock-test-node";
        AtomicInteger criticalSectionCounter = new AtomicInteger(0);
        
        ExecutorService executor = Executors.newFixedThreadPool(5);
        CountDownLatch latch = new CountDownLatch(5);
        
        // 模拟需要分布式锁保护的临界区
        for (int i = 0; i < 5; i++) {
            final int threadId = i;
            executor.submit(() -> {
                try {
                    for (int j = 0; j < 50; j++) {
                        // 模拟获取分布式锁
                        synchronized (this) {
                            int currentValue = criticalSectionCounter.incrementAndGet();
                            
                            // 在临界区内执行持久化操作
                            metricsManager.recordPersistOperation(actorType, nodeId, 5, currentValue);
                            
                            // 模拟临界区操作时间
                            Thread.sleep(1);
                        }
                    }
                } catch (Exception e) {
                    logger.error("分布式锁测试异常", e);
                } finally {
                    latch.countDown();
                }
            });
        }
        
        latch.await(30, TimeUnit.SECONDS);
        executor.shutdown();
        
        // 验证同步结果
        PersistenceMetricsManager.ActorMetrics metrics = 
            metricsManager.getActorMetrics(actorType, nodeId);
        assert metrics.getTotalPersistCount() == 250 : 
            "同步操作计数不正确: " + metrics.getTotalPersistCount();
        assert criticalSectionCounter.get() == 250 : 
            "临界区计数器不正确: " + criticalSectionCounter.get();
        
        logger.info("✓ 分布式锁和同步测试通过");
    }
    
    /**
     * 负载均衡测试
     */
    private void testLoadBalancing() throws Exception {
        logger.info("测试负载均衡...");
        
        String actorType = "LoadBalanceTestActor";
        Map<String, AtomicLong> nodeLoadCounts = new ConcurrentHashMap<>();
        
        // 初始化节点负载计数器
        for (int i = 0; i < CLUSTER_SIZE; i++) {
            nodeLoadCounts.put("lb-node-" + i, new AtomicLong(0));
        }
        
        ExecutorService executor = Executors.newFixedThreadPool(10);
        CountDownLatch latch = new CountDownLatch(10);
        
        // 模拟负载均衡分发
        for (int i = 0; i < 10; i++) {
            executor.submit(() -> {
                try {
                    Random random = new Random();
                    
                    for (int j = 0; j < 100; j++) {
                        // 随机选择节点（模拟负载均衡器）
                        int nodeIndex = random.nextInt(CLUSTER_SIZE);
                        String nodeId = "lb-node-" + nodeIndex;
                        
                        metricsManager.recordPersistOperation(actorType, nodeId, 5, 512);
                        nodeLoadCounts.get(nodeId).incrementAndGet();
                    }
                } catch (Exception e) {
                    logger.error("负载均衡测试异常", e);
                } finally {
                    latch.countDown();
                }
            });
        }
        
        latch.await(30, TimeUnit.SECONDS);
        executor.shutdown();
        
        // 分析负载分布
        logger.info("负载分布分析:");
        long totalLoad = 0;
        for (Map.Entry<String, AtomicLong> entry : nodeLoadCounts.entrySet()) {
            long nodeLoad = entry.getValue().get();
            totalLoad += nodeLoad;
            logger.info("  {}: {} 操作", entry.getKey(), nodeLoad);
        }
        
        // 验证负载均衡效果（允许一定的偏差）
        long averageLoad = totalLoad / CLUSTER_SIZE;
        for (AtomicLong nodeLoad : nodeLoadCounts.values()) {
            long deviation = Math.abs(nodeLoad.get() - averageLoad);
            double deviationPercent = (double) deviation / averageLoad * 100;
            assert deviationPercent < 50 : "负载偏差过大: " + deviationPercent + "%";
        }
        
        assert totalLoad == 1000 : "总负载不正确: " + totalLoad;
        
        logger.info("✓ 负载均衡测试通过");
    }
    
    /**
     * 集群故障转移测试
     */
    private void testClusterFailover() throws Exception {
        logger.info("测试集群故障转移...");
        
        String actorType = "FailoverTestActor";
        String primaryNodeId = "primary-node";
        String backupNodeId = "backup-node";
        
        // 在主节点上执行操作
        for (int i = 0; i < 100; i++) {
            metricsManager.recordPersistOperation(actorType, primaryNodeId, 10, 1024);
        }
        
        PersistenceMetricsManager.ActorMetrics primaryMetrics = 
            metricsManager.getActorMetrics(actorType, primaryNodeId);
        long primaryOperations = primaryMetrics.getTotalPersistCount();
        
        // 模拟主节点故障
        logger.info("模拟主节点故障...");
        simulateNodeFailure(primaryNodeId);
        
        // 故障转移到备份节点
        logger.info("故障转移到备份节点...");
        for (int i = 0; i < 50; i++) {
            metricsManager.recordPersistOperation(actorType, backupNodeId, 10, 1024);
        }
        
        // 模拟主节点恢复
        logger.info("模拟主节点恢复...");
        simulateNodeRecovery(primaryNodeId);
        
        // 继续在主节点上执行操作
        for (int i = 0; i < 30; i++) {
            metricsManager.recordPersistOperation(actorType, primaryNodeId, 10, 1024);
        }
        
        // 验证故障转移结果
        PersistenceMetricsManager.ActorMetrics finalPrimaryMetrics = 
            metricsManager.getActorMetrics(actorType, primaryNodeId);
        PersistenceMetricsManager.ActorMetrics backupMetrics = 
            metricsManager.getActorMetrics(actorType, backupNodeId);
        
        assert finalPrimaryMetrics.getTotalPersistCount() == primaryOperations + 30 : 
            "主节点操作计数不正确";
        assert backupMetrics.getTotalPersistCount() == 50 : 
            "备份节点操作计数不正确";
        
        logger.info("✓ 集群故障转移测试通过");
    }
    
    /**
     * 数据分片和复制测试
     */
    private void testDataShardingAndReplication() throws Exception {
        logger.info("测试数据分片和复制...");
        
        String actorType = "ShardingTestActor";
        Map<String, List<String>> shardToNodes = new HashMap<>();
        
        // 设置分片策略（3个分片，每个分片2个副本）
        for (int i = 0; i < 3; i++) {
            List<String> replicas = Arrays.asList("shard-" + i + "-replica-0", "shard-" + i + "-replica-1");
            shardToNodes.put("shard-" + i, replicas);
        }
        
        ExecutorService executor = Executors.newFixedThreadPool(6);
        CountDownLatch latch = new CountDownLatch(6);
        
        // 在每个分片副本上执行操作
        for (Map.Entry<String, List<String>> entry : shardToNodes.entrySet()) {
            String shardId = entry.getKey();
            List<String> replicas = entry.getValue();
            
            for (String replica : replicas) {
                executor.submit(() -> {
                    try {
                        for (int i = 0; i < 100; i++) {
                            metricsManager.recordPersistOperation(actorType, replica, 5, 512);
                        }
                    } catch (Exception e) {
                        logger.error("分片复制测试异常", e);
                    } finally {
                        latch.countDown();
                    }
                });
            }
        }
        
        latch.await(30, TimeUnit.SECONDS);
        executor.shutdown();
        
        // 验证分片和复制结果
        for (Map.Entry<String, List<String>> entry : shardToNodes.entrySet()) {
            String shardId = entry.getKey();
            List<String> replicas = entry.getValue();
            
            // 验证同一分片的所有副本数据一致
            long expectedCount = 100;
            for (String replica : replicas) {
                PersistenceMetricsManager.ActorMetrics metrics = 
                    metricsManager.getActorMetrics(actorType, replica);
                assert metrics.getTotalPersistCount() == expectedCount : 
                    "分片 " + shardId + " 副本 " + replica + " 数据不一致";
            }
        }
        
        logger.info("✓ 数据分片和复制测试通过");
    }
    
    /**
     * 性能压力测试
     */
    private void testPerformanceStress() throws Exception {
        logger.info("测试性能压力...");
        
        String actorType = "StressTestActor";
        AtomicLong totalOperations = new AtomicLong(0);
        AtomicInteger errorCount = new AtomicInteger(0);
        
        ExecutorService executor = Executors.newFixedThreadPool(CONCURRENT_THREADS);
        
        long startTime = System.currentTimeMillis();
        long endTime = startTime + STRESS_TEST_DURATION_MS;
        
        // 启动压力测试任务
        for (int i = 0; i < CONCURRENT_THREADS; i++) {
            final int threadId = i;
            executor.submit(() -> {
                String nodeId = "stress-node-" + threadId;
                
                while (System.currentTimeMillis() < endTime) {
                    try {
                        metricsManager.recordPersistOperation(actorType, nodeId, 5, 512);
                        totalOperations.incrementAndGet();
                        
                        // 短暂休息避免CPU过载
                        if (totalOperations.get() % 1000 == 0) {
                            Thread.sleep(1);
                        }
                    } catch (Exception e) {
                        errorCount.incrementAndGet();
                        if (errorCount.get() % 100 == 0) {
                            logger.warn("压力测试错误计数: {}", errorCount.get());
                        }
                    }
                }
            });
        }
        
        // 等待测试完成
        Thread.sleep(STRESS_TEST_DURATION_MS + 1000);
        executor.shutdown();
        executor.awaitTermination(10, TimeUnit.SECONDS);
        
        long actualDuration = System.currentTimeMillis() - startTime;
        double throughput = (double) totalOperations.get() * 1000 / actualDuration;
        double errorRate = (double) errorCount.get() / totalOperations.get() * 100;
        
        logger.info("性能压力测试结果:");
        logger.info("  测试持续时间: {}ms", actualDuration);
        logger.info("  总操作数: {}", totalOperations.get());
        logger.info("  错误数: {}", errorCount.get());
        logger.info("  错误率: {:.2f}%", errorRate);
        logger.info("  平均吞吐量: {:.2f}ops/s", throughput);
        
        // 验证压力测试结果
        assert totalOperations.get() > 10000 : "操作数过少: " + totalOperations.get();
        assert errorRate < 1.0 : "错误率过高: " + errorRate + "%";
        assert throughput > 500 : "吞吐量过低: " + throughput;
        
        logger.info("✓ 性能压力测试通过");
    }
    
    // 辅助方法
    private void simulateNodeFailure(String nodeId) {
        logger.debug("节点 {} 故障", nodeId);
    }
    
    private void simulateNodeRecovery(String nodeId) {
        logger.debug("节点 {} 恢复", nodeId);
    }
    
    /**
     * 清理测试环境
     */
    private void cleanupTestEnvironment() {
        logger.info("清理并发和集群测试环境...");
        
        // 关闭集群节点
        if (clusterNodes != null) {
            for (ActorSystem node : clusterNodes) {
                node.terminate();
            }
            clusterNodes.clear();
        }
        
        if (metricsManager != null) {
            metricsManager.clearMetrics();
        }
        
        logger.info("✓ 并发和集群测试环境清理完成");
    }
}
