//! # 并发和性能测试
//!
//! 提供消息总线系统的并发安全性和性能测试

#[cfg(test)]
mod concurrent_performance_tests {
    use crate::prelude::*;
    use crate::mock::{MockMessageBus, MockBehavior, test_helpers::*};
    use crate::publisher::{SimplePublisher, BatchPublisher};
    use serde_json::json;
    use std::sync::atomic::{AtomicUsize, Ordering};
    use std::sync::Arc;
    use tokio::time::{Duration, Instant};

    /// 并发安全性测试
    mod concurrency_tests {
        use super::*;

        #[tokio::test]
        async fn test_concurrent_publish_subscribe() {
            let bus = Arc::new(MockMessageBus::new());
            let published_count = Arc::new(AtomicUsize::new(0));
            let subscribed_count = Arc::new(AtomicUsize::new(0));
            
            let mut handles = vec![];
            
            // 并发发布消息
            for i in 0..50 {
                let bus_clone = bus.clone();
                let counter = published_count.clone();
                let handle = tokio::spawn(async move {
                    let message = create_test_message("concurrent.test", json!({"id": i}));
                    if bus_clone.publish(message).await.is_ok() {
                        counter.fetch_add(1, Ordering::Relaxed);
                    }
                });
                handles.push(handle);
            }
            
            // 并发创建订阅者
            for i in 0..10 {
                let bus_clone = bus.clone();
                let counter = subscribed_count.clone();
                let handle = tokio::spawn(async move {
                    if bus_clone.subscribe(&format!("concurrent.test.{}", i)).await.is_ok() {
                        counter.fetch_add(1, Ordering::Relaxed);
                    }
                });
                handles.push(handle);
            }
            
            for handle in handles {
                handle.await.unwrap();
            }
            
            assert_eq!(published_count.load(Ordering::Relaxed), 50);
            assert_eq!(subscribed_count.load(Ordering::Relaxed), 10);
        }

        #[tokio::test]
        async fn test_high_frequency_operations() {
            let bus = Arc::new(MockMessageBus::new());
            let operation_count = 1000;
            let concurrent_tasks = 20;
            let operations_per_task = operation_count / concurrent_tasks;
            
            let success_count = Arc::new(AtomicUsize::new(0));
            let mut handles = vec![];
            
            for task_id in 0..concurrent_tasks {
                let bus_clone = bus.clone();
                let counter = success_count.clone();
                
                let handle = tokio::spawn(async move {
                    let mut local_success = 0;
                    
                    for i in 0..operations_per_task {
                        let message = create_test_message(
                            "high.frequency",
                            json!({"task": task_id, "op": i})
                        );
                        
                        if bus_clone.publish(message).await.is_ok() {
                            local_success += 1;
                        }
                    }
                    
                    counter.fetch_add(local_success, Ordering::Relaxed);
                });
                
                handles.push(handle);
            }
            
            for handle in handles {
                handle.await.unwrap();
            }
            
            assert_eq!(success_count.load(Ordering::Relaxed), operation_count);
        }

        #[tokio::test]
        async fn test_mixed_operations_concurrency() {
            let bus = Arc::new(MockMessageBus::new());
            let mut handles = vec![];
            
            // 发布任务
            for i in 0..20 {
                let bus_clone = bus.clone();
                let handle = tokio::spawn(async move {
                    let message = create_test_message("mixed.ops", json!({"pub": i}));
                    bus_clone.publish(message).await
                });
                handles.push(handle);
            }
            
            // 订阅任务
            for i in 0..10 {
                let bus_clone = bus.clone();
                let handle = tokio::spawn(async move {
                    bus_clone.subscribe(&format!("mixed.ops.{}", i)).await
                });
                handles.push(handle);
            }
            
            // 统计任务
            for _ in 0..5 {
                let bus_clone = bus.clone();
                let handle = tokio::spawn(async move {
                    bus_clone.stats().await
                });
                handles.push(handle);
            }
            
            // 等待所有任务完成
            let results: Vec<_> = futures::future::join_all(handles).await;
            let success_count = results.iter().filter(|r| r.is_ok()).count();
            
            // 大部分操作应该成功
            assert!(success_count >= 30);
        }
    }

    /// 性能基准测试
    mod performance_tests {
        use super::*;

        #[tokio::test]
        async fn test_publish_throughput() {
            let bus = MockMessageBus::new();
            let message_count = 10000;
            
            let start = Instant::now();
            
            for i in 0..message_count {
                let message = create_test_message("throughput.test", json!({"id": i}));
                bus.publish(message).await.unwrap();
            }
            
            let duration = start.elapsed();
            let throughput = message_count as f64 / duration.as_secs_f64();
            
            println!("发布吞吐量: {:.2} 消息/秒", throughput);
            
            // 验证所有消息都被发布
            let published = bus.get_published_messages().await;
            assert_eq!(published.len(), message_count);
            
            // 性能应该足够高（这里设置一个较低的阈值）
            assert!(throughput > 1000.0, "吞吐量太低: {:.2}", throughput);
        }

        #[tokio::test]
        async fn test_subscribe_performance() {
            let bus = MockMessageBus::new();
            let subscriber_count = 1000;
            
            let start = Instant::now();
            
            let mut subscribers = vec![];
            for i in 0..subscriber_count {
                let subscriber = bus.subscribe(&format!("perf.test.{}", i)).await.unwrap();
                subscribers.push(subscriber);
            }
            
            let duration = start.elapsed();
            let rate = subscriber_count as f64 / duration.as_secs_f64();
            
            println!("订阅创建速率: {:.2} 订阅/秒", rate);
            
            // 验证所有订阅者都被创建
            assert_eq!(subscribers.len(), subscriber_count);
        }

        #[tokio::test]
        async fn test_batch_processing_performance() {
            let bus = Arc::new(MockMessageBus::new());
            let batch_publisher = BatchPublisher::new(
                bus.clone(),
                "batch_perf".to_string(),
                100, // 批处理大小
                1000 // 超时时间
            );
            
            let message_count = 5000;
            let start = Instant::now();
            
            for i in 0..message_count {
                let message = create_test_message("batch.perf", json!({"id": i}));
                batch_publisher.publish(message).await.unwrap();
            }
            
            // 确保所有批次都被处理
            batch_publisher.flush().await.unwrap();
            
            let duration = start.elapsed();
            let throughput = message_count as f64 / duration.as_secs_f64();
            
            println!("批处理吞吐量: {:.2} 消息/秒", throughput);
            
            let published = bus.get_published_messages().await;
            assert_eq!(published.len(), message_count);
        }

        #[tokio::test]
        async fn test_memory_usage_under_load() {
            let bus = Arc::new(MockMessageBus::new());
            
            // 创建大量订阅者
            let mut subscribers = vec![];
            for i in 0..100 {
                let subscriber = bus.subscribe(&format!("memory.test.{}", i)).await.unwrap();
                subscribers.push(subscriber);
            }
            
            // 发布大量消息
            for i in 0..1000 {
                let large_payload = json!({
                    "id": i,
                    "data": "x".repeat(1024), // 1KB 数据
                    "timestamp": chrono::Utc::now().to_rfc3339()
                });
                
                let message = create_test_message("memory.test.1", large_payload);
                bus.publish(message).await.unwrap();
            }
            
            let stats = bus.stats().await.unwrap();
            println!("内存使用统计: {:?}", stats);
            
            // 验证系统仍然正常工作
            assert_eq!(stats.total_published, 1000);
            assert_eq!(stats.active_subscribers, 100);
        }
    }

    /// 压力测试
    mod stress_tests {
        use super::*;

        #[tokio::test]
        async fn test_sustained_load() {
            let bus = Arc::new(MockMessageBus::new());
            let duration = Duration::from_secs(5); // 5秒压力测试
            let start = Instant::now();
            
            let message_count = Arc::new(AtomicUsize::new(0));
            let mut handles = vec![];
            
            // 启动多个并发发布者
            for worker_id in 0..10 {
                let bus_clone = bus.clone();
                let counter = message_count.clone();
                
                let handle = tokio::spawn(async move {
                    let mut local_count = 0;
                    
                    while start.elapsed() < duration {
                        let message = create_test_message(
                            "stress.test",
                            json!({"worker": worker_id, "count": local_count})
                        );
                        
                        if bus_clone.publish(message).await.is_ok() {
                            local_count += 1;
                        }
                        
                        // 小延迟以模拟真实工作负载
                        tokio::time::sleep(Duration::from_millis(1)).await;
                    }
                    
                    counter.fetch_add(local_count, Ordering::Relaxed);
                });
                
                handles.push(handle);
            }
            
            for handle in handles {
                handle.await.unwrap();
            }
            
            let total_messages = message_count.load(Ordering::Relaxed);
            let rate = total_messages as f64 / duration.as_secs_f64();
            
            println!("持续负载测试: 发送了 {} 条消息，速率: {:.2} 消息/秒", total_messages, rate);
            
            // 验证系统处理了消息
            assert!(total_messages > 0);
            
            let published = bus.get_published_messages().await;
            assert_eq!(published.len(), total_messages);
        }

        #[tokio::test]
        async fn test_burst_traffic() {
            let bus = Arc::new(MockMessageBus::new());
            let burst_size = 1000;
            let burst_count = 5;
            
            for burst in 0..burst_count {
                let start = Instant::now();
                let mut handles = vec![];
                
                // 突发流量
                for i in 0..burst_size {
                    let bus_clone = bus.clone();
                    let handle = tokio::spawn(async move {
                        let message = create_test_message(
                            "burst.test",
                            json!({"burst": burst, "msg": i})
                        );
                        bus_clone.publish(message).await
                    });
                    handles.push(handle);
                }
                
                // 等待这一批完成
                let results: Vec<_> = futures::future::join_all(handles).await;
                let success_count = results.iter().filter(|r| r.is_ok()).count();
                
                let duration = start.elapsed();
                println!("突发 {}: {} 条消息，用时 {:?}", burst + 1, success_count, duration);
                
                assert_eq!(success_count, burst_size);
                
                // 突发之间的间隔
                tokio::time::sleep(Duration::from_millis(100)).await;
            }
            
            let published = bus.get_published_messages().await;
            assert_eq!(published.len(), burst_size * burst_count);
        }

        #[tokio::test]
        async fn test_resource_exhaustion_recovery() {
            let bus = Arc::new(MockMessageBus::new());
            
            // 第一阶段：正常操作
            for i in 0..100 {
                let message = create_test_message("recovery.test", json!({"phase": 1, "id": i}));
                bus.publish(message).await.unwrap();
            }
            
            // 第二阶段：模拟资源耗尽
            bus.set_behavior("publish", MockBehavior::new().with_failure("资源耗尽")).await;
            
            for i in 0..50 {
                let message = create_test_message("recovery.test", json!({"phase": 2, "id": i}));
                let result = bus.publish(message).await;
                assert!(result.is_err());
            }
            
            // 第三阶段：恢复
            bus.set_behavior("publish", MockBehavior::new()).await;
            
            for i in 0..100 {
                let message = create_test_message("recovery.test", json!({"phase": 3, "id": i}));
                bus.publish(message).await.unwrap();
            }
            
            // 验证恢复后系统正常
            let published = bus.get_published_messages().await;
            assert_eq!(published.len(), 200); // 阶段1和阶段3的消息
        }
    }

    /// 延迟和响应时间测试
    mod latency_tests {
        use super::*;

        #[tokio::test]
        async fn test_publish_latency() {
            let bus = MockMessageBus::new();
            let test_count = 1000;
            let mut latencies = vec![];
            
            for i in 0..test_count {
                let message = create_test_message("latency.test", json!({"id": i}));
                
                let start = Instant::now();
                bus.publish(message).await.unwrap();
                let latency = start.elapsed();
                
                latencies.push(latency);
            }
            
            // 计算统计信息
            latencies.sort();
            let p50 = latencies[test_count / 2];
            let p95 = latencies[test_count * 95 / 100];
            let p99 = latencies[test_count * 99 / 100];
            
            println!("发布延迟统计:");
            println!("  P50: {:?}", p50);
            println!("  P95: {:?}", p95);
            println!("  P99: {:?}", p99);
            
            // 验证延迟在合理范围内
            assert!(p99 < Duration::from_millis(100), "P99延迟过高: {:?}", p99);
        }

        #[tokio::test]
        async fn test_subscribe_latency() {
            let bus = MockMessageBus::new();
            let test_count = 100;
            let mut latencies = vec![];
            
            for i in 0..test_count {
                let start = Instant::now();
                let _subscriber = bus.subscribe(&format!("latency.sub.{}", i)).await.unwrap();
                let latency = start.elapsed();
                
                latencies.push(latency);
            }
            
            latencies.sort();
            let p95 = latencies[test_count * 95 / 100];
            println!("订阅延迟 P95: {:?}", p95);
            
            assert!(p95 < Duration::from_millis(50), "订阅延迟过高: {:?}", p95);
        }
    }
}