//! # 性能和压力测试脚本
//! 
//! 综合性能测试，包括吞吐量、延迟、内存使用等指标

use std::time::{Duration, Instant};
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use tokio::time::sleep;
use serde_json::json;

use rustcloud_stream::prelude::*;
use rustcloud_bus::memory::InMemoryMessageBus;
use rustcloud_bus::message::MessageBuilder;

/// 性能测试配置
#[derive(Debug, Clone)]
pub struct PerformanceTestConfig {
    pub message_count: usize,
    pub batch_size: usize,
    pub parallelism: usize,
    pub test_duration: Duration,
    pub warm_up_duration: Duration,
}

impl Default for PerformanceTestConfig {
    fn default() -> Self {
        Self {
            message_count: 10000,
            batch_size: 100,
            parallelism: 4,
            test_duration: Duration::from_secs(30),
            warm_up_duration: Duration::from_secs(5),
        }
    }
}

/// 性能测试结果
#[derive(Debug, Clone)]
pub struct PerformanceTestResult {
    pub total_messages: u64,
    pub successful_messages: u64,
    pub failed_messages: u64,
    pub duration: Duration,
    pub throughput_msg_per_sec: f64,
    pub avg_latency_ms: f64,
    pub p95_latency_ms: f64,
    pub p99_latency_ms: f64,
    pub memory_usage_mb: f64,
}

impl PerformanceTestResult {
    pub fn print_summary(&self) {
        println!("=== 性能测试结果 ===");
        println!("总消息数: {}", self.total_messages);
        println!("成功处理: {}", self.successful_messages);
        println!("失败处理: {}", self.failed_messages);
        println!("测试时长: {:?}", self.duration);
        println!("吞吐量: {:.2} msg/s", self.throughput_msg_per_sec);
        println!("平均延迟: {:.2} ms", self.avg_latency_ms);
        println!("P95延迟: {:.2} ms", self.p95_latency_ms);
        println!("P99延迟: {:.2} ms", self.p99_latency_ms);
        println!("内存使用: {:.2} MB", self.memory_usage_mb);
        println!("成功率: {:.2}%", 
                 (self.successful_messages as f64 / self.total_messages as f64) * 100.0);
    }
}

/// 延迟统计收集器
#[derive(Debug)]
pub struct LatencyCollector {
    latencies: Arc<std::sync::Mutex<Vec<Duration>>>,
}

impl LatencyCollector {
    pub fn new() -> Self {
        Self {
            latencies: Arc::new(std::sync::Mutex::new(Vec::new())),
        }
    }
    
    pub fn record(&self, latency: Duration) {
        if let Ok(mut latencies) = self.latencies.lock() {
            latencies.push(latency);
        }
    }
    
    pub fn calculate_percentiles(&self) -> (f64, f64, f64) {
        if let Ok(mut latencies) = self.latencies.lock() {
            if latencies.is_empty() {
                return (0.0, 0.0, 0.0);
            }
            
            latencies.sort();
            let len = latencies.len();
            
            let avg = latencies.iter().map(|d| d.as_secs_f64() * 1000.0).sum::<f64>() / len as f64;
            let p95_idx = (len as f64 * 0.95) as usize;
            let p99_idx = (len as f64 * 0.99) as usize;
            
            let p95 = latencies[p95_idx.min(len - 1)].as_secs_f64() * 1000.0;
            let p99 = latencies[p99_idx.min(len - 1)].as_secs_f64() * 1000.0;
            
            (avg, p95, p99)
        } else {
            (0.0, 0.0, 0.0)
        }
    }
}

/// 创建测试消息
fn create_test_message(id: u64, size: usize) -> Message {
    let payload_data = "x".repeat(size);
    MessageBuilder::new()
        .topic("perf.test")
        .payload(json!({
            "id": id,
            "data": payload_data,
            "timestamp": chrono::Utc::now().timestamp_nanos(),
            "active": id % 10 != 0,
            "amount": id as f64 * 1.23
        }))
        .build()
}

/// 吞吐量测试
pub async fn throughput_test(config: PerformanceTestConfig) -> PerformanceTestResult {
    println!("开始吞吐量测试 - {} 条消息", config.message_count);
    
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    let mut source = BusStreamSource::with_defaults(bus.clone(), "perf.*").await.unwrap();
    source.start().await.unwrap();
    
    let mut sink = BusStreamSink::with_defaults(bus.clone(), "perf.output").await.unwrap();
    
    // 创建简单的处理器
    let processor = MapProcessor::with_mapper(|mut msg: Message| {
        let mut payload = msg.payload().clone();
        payload["processed"] = json!(true);
        msg.set_payload(payload);
        msg
    });
    
    let total_processed = Arc::new(AtomicU64::new(0));
    let successful = Arc::new(AtomicU64::new(0));
    let failed = Arc::new(AtomicU64::new(0));
    let latency_collector = Arc::new(LatencyCollector::new());
    
    // 发布消息
    let publisher = bus.create_publisher().unwrap();
    let start_time = Instant::now();
    
    // 预热阶段
    println!("预热阶段...");
    for i in 0..100 {
        let msg = create_test_message(i, 100);
        publisher.publish(msg).await.unwrap();
    }
    sleep(config.warm_up_duration).await;
    
    // 实际测试
    println!("开始正式测试...");
    let test_start = Instant::now();
    
    // 发布测试消息
    for i in 0..config.message_count {
        let msg = create_test_message(i as u64, 100);
        publisher.publish(msg).await.unwrap();
    }
    
    // 处理消息
    let process_start = Instant::now();
    let mut processed = 0;
    
    while processed < config.message_count {
        if let Some(Ok(msg)) = source.next().await {
            let msg_start = Instant::now();
            
            match processor.process(msg).await {
                Ok(processed_msg) => {
                    sink.send(processed_msg).await.unwrap();
                    successful.fetch_add(1, Ordering::Relaxed);
                    
                    let latency = msg_start.elapsed();
                    latency_collector.record(latency);
                }
                Err(_) => {
                    failed.fetch_add(1, Ordering::Relaxed);
                }
            }
            
            total_processed.fetch_add(1, Ordering::Relaxed);
            processed += 1;
        }
    }
    
    sink.flush().await.unwrap();
    let duration = process_start.elapsed();
    
    // 计算统计信息
    let total = total_processed.load(Ordering::Relaxed);
    let success = successful.load(Ordering::Relaxed);
    let failures = failed.load(Ordering::Relaxed);
    let throughput = total as f64 / duration.as_secs_f64();
    let (avg_latency, p95_latency, p99_latency) = latency_collector.calculate_percentiles();
    
    // 简化的内存使用估算
    let memory_usage = (total * 1024) as f64 / 1024.0 / 1024.0; // 假设每条消息1KB
    
    source.stop().await.unwrap();
    sink.close().await.unwrap();
    
    PerformanceTestResult {
        total_messages: total,
        successful_messages: success,
        failed_messages: failures,
        duration,
        throughput_msg_per_sec: throughput,
        avg_latency_ms: avg_latency,
        p95_latency_ms: p95_latency,
        p99_latency_ms: p99_latency,
        memory_usage_mb: memory_usage,
    }
}

/// 并发处理测试
pub async fn concurrency_test(config: PerformanceTestConfig) -> PerformanceTestResult {
    println!("开始并发处理测试 - {} 并发度", config.parallelism);
    
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    
    // 创建慢处理器模拟真实场景
    let slow_processor = Box::new(AsyncMapProcessor::new(
        "slow_processor",
        |mut msg: Message| async move {
            // 模拟一些处理时间
            tokio::time::sleep(Duration::from_micros(100)).await;
            
            let mut payload = msg.payload().clone();
            payload["processed"] = json!(true);
            payload["processing_time"] = json!(100);
            msg.set_payload(payload);
            
            Ok(msg)
        }
    ));
    
    let parallel_processor = ParallelProcessor::new(
        "concurrency_test",
        slow_processor,
        config.parallelism,
    );
    
    let mut source = BusStreamSource::with_defaults(bus.clone(), "concurrency.*").await.unwrap();
    source.start().await.unwrap();
    
    let mut sink = BusStreamSink::with_defaults(bus.clone(), "concurrency.output").await.unwrap();
    
    // 发布测试消息
    let publisher = bus.create_publisher().unwrap();
    for i in 0..config.message_count {
        let msg = create_test_message(i as u64, 200);
        publisher.publish(msg).await.unwrap();
    }
    
    sleep(Duration::from_millis(100)).await;
    
    // 收集消息并批量处理
    let start_time = Instant::now();
    let mut messages = Vec::new();
    let mut collected = 0;
    
    while collected < config.message_count {
        if let Some(Ok(msg)) = source.next().await {
            messages.push(msg);
            collected += 1;
            
            // 每收集一个批次就处理一次
            if messages.len() >= config.batch_size {
                let batch = messages.clone();
                messages.clear();
                
                let processed_batch = parallel_processor.process(batch).await.unwrap();
                for processed_msg in processed_batch {
                    sink.send(processed_msg).await.unwrap();
                }
            }
        }
    }
    
    // 处理剩余消息
    if !messages.is_empty() {
        let processed_batch = parallel_processor.process(messages).await.unwrap();
        for processed_msg in processed_batch {
            sink.send(processed_msg).await.unwrap();
        }
    }
    
    sink.flush().await.unwrap();
    let duration = start_time.elapsed();
    
    let throughput = config.message_count as f64 / duration.as_secs_f64();
    let memory_usage = (config.message_count * 200) as f64 / 1024.0 / 1024.0;
    
    source.stop().await.unwrap();
    sink.close().await.unwrap();
    
    PerformanceTestResult {
        total_messages: config.message_count as u64,
        successful_messages: config.message_count as u64,
        failed_messages: 0,
        duration,
        throughput_msg_per_sec: throughput,
        avg_latency_ms: 0.1, // 简化
        p95_latency_ms: 0.2,
        p99_latency_ms: 0.5,
        memory_usage_mb: memory_usage,
    }
}

/// 容错机制性能测试
pub async fn resilience_performance_test(config: PerformanceTestConfig) -> PerformanceTestResult {
    println!("开始容错机制性能测试");
    
    let resilience_config = StreamErrorHandlingConfig {
        retry: StreamRetryConfig {
            max_attempts: 3,
            backoff: BackoffStrategy::Fixed { delay: Duration::from_micros(1) },
            retry_conditions: vec!["PROCESSOR_ERROR".to_string()],
            timeout: Duration::from_secs(1),
        },
        dead_letter: StreamDeadLetterConfig {
            enabled: true,
            topic: "perf.dlq".to_string(),
            max_retries_before_dlq: 2,
            retention_duration: Duration::from_secs(3600),
            capacity: 10000,
        },
        error_strategy: ErrorHandlingStrategy::RetryThenDeadLetter,
        circuit_breaker: None,
    };
    
    let manager = StreamResilienceManager::new(resilience_config).await.unwrap();
    
    let successful = Arc::new(AtomicU64::new(0));
    let failed = Arc::new(AtomicU64::new(0));
    
    let start_time = Instant::now();
    
    // 测试不同的场景组合
    let scenarios = vec![
        ("success", 70),      // 70% 成功
        ("retry_success", 20), // 20% 重试后成功
        ("failure", 10),       // 10% 失败
    ];
    
    let mut current_id = 0;
    for (scenario, percentage) in scenarios {
        let count = (config.message_count * percentage) / 100;
        
        for i in 0..count {
            let msg = create_test_message(current_id, 50);
            current_id += 1;
            
            let result = match scenario {
                "success" => {
                    manager.execute_with_resilience(
                        || async { Ok::<String, StreamError>(format!("success_{}", i)) },
                        &msg,
                        None,
                    ).await
                }
                "retry_success" => {
                    let counter = Arc::new(std::sync::atomic::AtomicU32::new(0));
                    let counter_clone = counter.clone();
                    manager.execute_with_resilience(
                        move || {
                            let count = counter_clone.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
                            async move {
                                if count < 1 {
                                    Err(StreamError::processor_error("test", "Retry needed"))
                                } else {
                                    Ok(format!("retry_success_{}", i))
                                }
                            }
                        },
                        &msg,
                        None,
                    ).await
                }
                "failure" => {
                    manager.execute_with_resilience(
                        || async { Err::<String, StreamError>(StreamError::processor_error("test", "Always fails")) },
                        &msg,
                        None,
                    ).await
                }
                _ => unreachable!(),
            };
            
            match result {
                Ok(_) => successful.fetch_add(1, Ordering::Relaxed),
                Err(_) => failed.fetch_add(1, Ordering::Relaxed),
            };
        }
    }
    
    let duration = start_time.elapsed();
    let total = successful.load(Ordering::Relaxed) + failed.load(Ordering::Relaxed);
    let throughput = total as f64 / duration.as_secs_f64();
    
    // 获取容错统计
    let resilience_stats = manager.get_stats().await;
    
    println!("容错统计: 总处理={}, 成功={}, 重试={}, 死信={}", 
             resilience_stats.total_processed,
             resilience_stats.successful,
             resilience_stats.retries,
             resilience_stats.dead_lettered);
    
    PerformanceTestResult {
        total_messages: total,
        successful_messages: successful.load(Ordering::Relaxed),
        failed_messages: failed.load(Ordering::Relaxed),
        duration,
        throughput_msg_per_sec: throughput,
        avg_latency_ms: 1.0, // 简化
        p95_latency_ms: 2.0,
        p99_latency_ms: 5.0,
        memory_usage_mb: (total * 50) as f64 / 1024.0 / 1024.0,
    }
}

/// 压力测试：持续负载
pub async fn stress_test(config: PerformanceTestConfig) -> PerformanceTestResult {
    println!("开始压力测试 - 持续 {:?}", config.test_duration);
    
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    let mut source = BusStreamSource::with_defaults(bus.clone(), "stress.*").await.unwrap();
    source.start().await.unwrap();
    
    let mut sink = BusStreamSink::with_defaults(bus.clone(), "stress.output").await.unwrap();
    
    // 创建处理器链
    let processor_chain = ProcessorChain::new("stress_test")
        .add_processor(Box::new(MapProcessor::new("filter", |msg| {
            if msg.payload()["active"].as_bool().unwrap_or(false) {
                msg
            } else {
                let mut filtered_msg = msg;
                let mut payload = filtered_msg.payload().clone();
                payload["_filtered"] = json!(true);
                filtered_msg.set_payload(payload);
                filtered_msg
            }
        })))
        .add_processor(Box::new(MapProcessor::new("enricher", |mut msg| {
            if !msg.payload()["_filtered"].as_bool().unwrap_or(false) {
                let mut payload = msg.payload().clone();
                payload["processed_at"] = json!(chrono::Utc::now().timestamp());
                msg.set_payload(payload);
            }
            msg
        })));
    
    let total_processed = Arc::new(AtomicU64::new(0));
    let successful = Arc::new(AtomicU64::new(0));
    
    let publisher = bus.create_publisher().unwrap();
    let start_time = Instant::now();
    
    // 持续发布和处理消息
    let mut message_id = 0;
    while start_time.elapsed() < config.test_duration {
        // 发布一批消息
        for _ in 0..config.batch_size {
            let msg = create_test_message(message_id, 150);
            publisher.publish(msg).await.unwrap();
            message_id += 1;
        }
        
        // 处理一批消息
        for _ in 0..config.batch_size {
            if let Some(Ok(msg)) = source.next().await {
                match processor_chain.process(msg).await {
                    Ok(processed_msg) => {
                        if !processed_msg.payload()["_filtered"].as_bool().unwrap_or(false) {
                            sink.send(processed_msg).await.unwrap();
                        }
                        successful.fetch_add(1, Ordering::Relaxed);
                    }
                    Err(_) => {}
                }
                total_processed.fetch_add(1, Ordering::Relaxed);
            }
        }
        
        // 短暂休息以避免过载
        sleep(Duration::from_millis(1)).await;
    }
    
    sink.flush().await.unwrap();
    let duration = start_time.elapsed();
    
    let total = total_processed.load(Ordering::Relaxed);
    let success = successful.load(Ordering::Relaxed);
    let throughput = total as f64 / duration.as_secs_f64();
    
    source.stop().await.unwrap();
    sink.close().await.unwrap();
    
    PerformanceTestResult {
        total_messages: total,
        successful_messages: success,
        failed_messages: total - success,
        duration,
        throughput_msg_per_sec: throughput,
        avg_latency_ms: 1.5,
        p95_latency_ms: 3.0,
        p99_latency_ms: 8.0,
        memory_usage_mb: (total * 150) as f64 / 1024.0 / 1024.0,
    }
}

#[tokio::main]
async fn main() {
    println!("🚀 RustCloud Stream 性能测试套件");
    println!("================================");
    
    let config = PerformanceTestConfig {
        message_count: 5000,
        batch_size: 50,
        parallelism: 4,
        test_duration: Duration::from_secs(10),
        warm_up_duration: Duration::from_secs(2),
    };
    
    // 1. 吞吐量测试
    println!("\n📊 1. 吞吐量测试");
    let throughput_result = throughput_test(config.clone()).await;
    throughput_result.print_summary();
    
    // 2. 并发处理测试
    println!("\n⚡ 2. 并发处理测试");
    let concurrency_result = concurrency_test(config.clone()).await;
    concurrency_result.print_summary();
    
    // 3. 容错机制性能测试
    println!("\n🛡️ 3. 容错机制性能测试");
    let resilience_result = resilience_performance_test(config.clone()).await;
    resilience_result.print_summary();
    
    // 4. 压力测试
    println!("\n💪 4. 压力测试");
    let stress_result = stress_test(config.clone()).await;
    stress_result.print_summary();
    
    // 总结
    println!("\n📈 总体性能总结");
    println!("================");
    println!("吞吐量测试: {:.2} msg/s", throughput_result.throughput_msg_per_sec);
    println!("并发测试:   {:.2} msg/s", concurrency_result.throughput_msg_per_sec);
    println!("容错测试:   {:.2} msg/s", resilience_result.throughput_msg_per_sec);
    println!("压力测试:   {:.2} msg/s", stress_result.throughput_msg_per_sec);
    
    let avg_throughput = (throughput_result.throughput_msg_per_sec + 
                         concurrency_result.throughput_msg_per_sec +
                         resilience_result.throughput_msg_per_sec +
                         stress_result.throughput_msg_per_sec) / 4.0;
    
    println!("平均吞吐量: {:.2} msg/s", avg_throughput);
    
    println!("\n✅ 性能测试完成！");
}