//! # 流处理集成测试
//! 
//! 测试完整的流处理管道，包括端到端场景

use std::time::Duration;
use std::sync::Arc;
use tokio::time::sleep;
use serde_json::json;

use rustcloud_stream::prelude::*;
use rustcloud_bus::memory::InMemoryMessageBus;
use rustcloud_bus::message::MessageBuilder;

/// 完整的端到端流处理测试
#[tokio::test]
async fn test_end_to_end_stream_pipeline() {
    // 创建消息总线
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    
    // 创建数据源：监听用户事件
    let mut source = BusStreamSource::with_defaults(bus.clone(), "user.events.*").await.unwrap();
    source.start().await.unwrap();
    
    // 创建数据汇：输出到处理结果
    let mut sink = BusStreamSink::with_defaults(bus.clone(), "processed.results").await.unwrap();
    
    // 创建处理器链：过滤 -> 映射 -> 聚合
    let filter_processor = Box::new(MapProcessor::new("active_filter", |msg| {
        // 过滤活跃用户
        if msg.payload()["active"].as_bool().unwrap_or(false) {
            msg
        } else {
            let mut filtered_msg = msg;
            let mut payload = filtered_msg.payload().clone();
            payload["_filtered"] = json!(true);
            filtered_msg.set_payload(payload);
            filtered_msg
        }
    }));
    
    let enrich_processor = Box::new(MapProcessor::new("enricher", |mut msg| {
        // 丰富消息数据
        if !msg.payload()["_filtered"].as_bool().unwrap_or(false) {
            let mut payload = msg.payload().clone();
            payload["processed_at"] = json!(chrono::Utc::now().timestamp());
            payload["pipeline"] = json!("user_processing");
            msg.set_payload(payload);
        }
        msg
    }));
    
    let processor_chain = ProcessorChain::new("user_pipeline")
        .add_processor(filter_processor)
        .add_processor(enrich_processor);
    
    // 创建订阅器验证输出
    let output_subscriber = bus.subscribe("processed.results").await.unwrap();
    
    // 发布测试消息
    let publisher = bus.create_publisher().unwrap();
    
    let test_messages = vec![
        MessageBuilder::new()
            .topic("user.events.login")
            .payload(json!({"user_id": 1, "action": "login", "active": true}))
            .build(),
        MessageBuilder::new()
            .topic("user.events.logout")
            .payload(json!({"user_id": 2, "action": "logout", "active": false}))
            .build(),
        MessageBuilder::new()
            .topic("user.events.purchase")
            .payload(json!({"user_id": 3, "action": "purchase", "active": true, "amount": 99.99}))
            .build(),
    ];
    
    // 发布消息
    for msg in test_messages {
        publisher.publish(msg).await.unwrap();
    }
    
    // 等待消息被接收
    sleep(Duration::from_millis(100)).await;
    
    // 处理消息
    let mut processed_count = 0;
    let mut active_count = 0;
    
    while let Some(Ok(msg)) = source.next().await {
        let processed_msg = processor_chain.process(msg).await.unwrap();
        
        // 只发送未被过滤的消息
        if !processed_msg.payload()["_filtered"].as_bool().unwrap_or(false) {
            sink.send(processed_msg).await.unwrap();
            active_count += 1;
        }
        
        processed_count += 1;
        
        if processed_count >= 3 {
            break;
        }
    }
    
    sink.flush().await.unwrap();
    
    // 验证结果
    assert_eq!(processed_count, 3);
    assert_eq!(active_count, 2); // 只有2个活跃用户
    
    // 验证输出消息
    sleep(Duration::from_millis(50)).await;
    let mut output_count = 0;
    
    while let Some(msg) = output_subscriber.receive().await.unwrap() {
        assert_eq!(msg.topic(), "processed.results");
        assert!(msg.payload()["processed_at"].is_number());
        assert_eq!(msg.payload()["pipeline"], json!("user_processing"));
        output_count += 1;
        
        if output_count >= 2 {
            break;
        }
    }
    
    assert_eq!(output_count, 2);
    
    // 清理
    source.stop().await.unwrap();
    sink.close().await.unwrap();
}

/// 批处理集成测试
#[tokio::test]
async fn test_batch_processing_integration() {
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    
    // 创建批处理配置
    let batch_size = 5;
    let batch_processor = BatchProcessor::new("integration_batch", batch_size, Duration::from_secs(1));
    
    let mut source = BusStreamSource::with_defaults(bus.clone(), "batch.input").await.unwrap();
    source.start().await.unwrap();
    
    let mut sink = BusStreamSink::with_defaults(bus.clone(), "batch.output").await.unwrap();
    
    // 发布测试数据
    let publisher = bus.create_publisher().unwrap();
    
    for i in 0..12 {
        let msg = MessageBuilder::new()
            .topic("batch.input")
            .payload(json!({"id": i, "data": format!("item_{}", i)}))
            .build();
        publisher.publish(msg).await.unwrap();
    }
    
    sleep(Duration::from_millis(100)).await;
    
    // 处理批次
    let mut total_processed = 0;
    let mut batch_count = 0;
    
    while total_processed < 12 {
        if let Some(Ok(msg)) = source.next().await {
            if let Ok(Some(batch)) = batch_processor.process(msg).await {
                // 处理整个批次
                for batch_msg in batch {
                    sink.send(batch_msg).await.unwrap();
                    total_processed += 1;
                }
                batch_count += 1;
            }
        } else {
            break;
        }
    }
    
    sink.flush().await.unwrap();
    
    // 验证批处理结果
    assert_eq!(total_processed, 10); // 2个完整批次 = 10个消息
    assert_eq!(batch_count, 2);
    
    source.stop().await.unwrap();
    sink.close().await.unwrap();
}

/// 容错机制集成测试
#[tokio::test]
async fn test_resilience_integration() {
    let config = StreamErrorHandlingConfig {
        retry: StreamRetryConfig {
            max_attempts: 3,
            backoff: BackoffStrategy::Fixed { delay: Duration::from_millis(10) },
            retry_conditions: vec!["PROCESSOR_ERROR".to_string()],
            timeout: Duration::from_secs(1),
        },
        dead_letter: StreamDeadLetterConfig {
            enabled: true,
            topic: "test.dlq".to_string(),
            max_retries_before_dlq: 2,
            retention_duration: Duration::from_secs(3600),
            capacity: 100,
        },
        error_strategy: ErrorHandlingStrategy::RetryThenDeadLetter,
        circuit_breaker: None,
    };
    
    let manager = StreamResilienceManager::new(config).await.unwrap();
    
    let test_msg = MessageBuilder::new()
        .topic("test.resilience")
        .payload(json!({"test": "resilience_integration"}))
        .build();
    
    // 测试重试后成功的场景
    let counter = Arc::new(std::sync::atomic::AtomicU32::new(0));
    let counter_clone = counter.clone();
    
    let result = manager.execute_with_resilience(
        move || {
            let count = counter_clone.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
            async move {
                if count < 2 {
                    Err(StreamError::processor_error("test", "Temporary failure"))
                } else {
                    Ok("success_after_retry".to_string())
                }
            }
        },
        &test_msg,
        None,
    ).await;
    
    assert!(result.is_ok());
    assert_eq!(result.unwrap(), "success_after_retry");
    assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 3);
    
    // 验证统计信息
    let stats = manager.get_stats().await;
    assert_eq!(stats.total_processed, 1);
    assert_eq!(stats.successful, 1);
    assert!(stats.retries > 0);
    
    // 测试进入死信队列的场景
    let result2 = manager.execute_with_resilience(
        || async { Err::<String, StreamError>(StreamError::processor_error("test", "Always fails")) },
        &test_msg,
        None,
    ).await;
    
    assert!(result2.is_err());
    
    let stats2 = manager.get_stats().await;
    assert_eq!(stats2.total_processed, 2);
    assert_eq!(stats2.successful, 1);
    assert_eq!(stats2.dead_lettered, 1);
    
    // 验证死信队列
    let dlq_messages = manager.get_dead_letter_messages().await;
    assert_eq!(dlq_messages.len(), 1);
    assert_eq!(dlq_messages[0].error_code, "PROCESSOR_ERROR");
}

/// 聚合处理集成测试
#[tokio::test]
async fn test_aggregation_integration() {
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    
    // 创建聚合处理器：按用户统计交易金额
    let aggregator = AggregateProcessor::new(
        "user_transactions",
        |msg: &Message| -> StreamResult<String> {
            Ok(msg.payload()["user_id"].as_str().unwrap_or("unknown").to_string())
        },
        |msg: &Message| -> StreamResult<f64> {
            Ok(msg.payload()["amount"].as_f64().unwrap_or(0.0))
        },
        |acc: &f64, value: &f64| acc + value,
        0.0f64,
    );
    
    let mut source = BusStreamSource::with_defaults(bus.clone(), "transactions.*").await.unwrap();
    source.start().await.unwrap();
    
    // 发布交易数据
    let publisher = bus.create_publisher().unwrap();
    
    let transactions = vec![
        ("user1", 100.0),
        ("user2", 50.0),
        ("user1", 75.0),
        ("user3", 200.0),
        ("user2", 25.0),
        ("user1", 150.0),
    ];
    
    for (user_id, amount) in transactions {
        let msg = MessageBuilder::new()
            .topic("transactions.payment")
            .payload(json!({"user_id": user_id, "amount": amount, "type": "payment"}))
            .build();
        publisher.publish(msg).await.unwrap();
    }
    
    sleep(Duration::from_millis(100)).await;
    
    // 处理聚合
    let mut processed_count = 0;
    let mut final_aggregation = None;
    
    while processed_count < 6 {
        if let Some(Ok(msg)) = source.next().await {
            let aggregation_result = aggregator.process(msg).await.unwrap();
            final_aggregation = Some(aggregation_result);
            processed_count += 1;
        } else {
            break;
        }
    }
    
    // 验证聚合结果
    let aggregation = final_aggregation.unwrap();
    assert_eq!(*aggregation.get("user1").unwrap(), 325.0); // 100 + 75 + 150
    assert_eq!(*aggregation.get("user2").unwrap(), 75.0);  // 50 + 25
    assert_eq!(*aggregation.get("user3").unwrap(), 200.0); // 200
    
    source.stop().await.unwrap();
}

/// 路由处理集成测试
#[tokio::test]
async fn test_routing_integration() {
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    
    // 创建路由处理器
    let router = RouteProcessor::new(
        "event_router",
        |msg: &Message| {
            let event_type = msg.payload()["type"].as_str().unwrap_or("unknown");
            let priority = msg.payload()["priority"].as_str().unwrap_or("normal");
            format!("output.{}.{}", event_type, priority)
        }
    );
    
    let mut source = BusStreamSource::with_defaults(bus.clone(), "events.*").await.unwrap();
    source.start().await.unwrap();
    
    // 创建多个输出汇
    let mut high_priority_sink = BusStreamSink::with_defaults(bus.clone(), "output.error.high").await.unwrap();
    let mut normal_sink = BusStreamSink::with_defaults(bus.clone(), "output.info.normal").await.unwrap();
    
    // 发布不同类型的事件
    let publisher = bus.create_publisher().unwrap();
    
    let events = vec![
        ("error", "high", "System failure detected"),
        ("info", "normal", "User login successful"),
        ("warning", "normal", "Low disk space"),
        ("error", "high", "Database connection lost"),
    ];
    
    for (event_type, priority, message) in events {
        let msg = MessageBuilder::new()
            .topic("events.system")
            .payload(json!({
                "type": event_type,
                "priority": priority,
                "message": message,
                "timestamp": chrono::Utc::now().timestamp()
            }))
            .build();
        publisher.publish(msg).await.unwrap();
    }
    
    sleep(Duration::from_millis(100)).await;
    
    // 处理路由
    let mut processed_count = 0;
    let mut high_priority_count = 0;
    let mut normal_count = 0;
    
    while processed_count < 4 {
        if let Some(Ok(msg)) = source.next().await {
            let route_result = router.process(msg).await.unwrap();
            
            match route_result.route.as_str() {
                "output.error.high" => {
                    high_priority_sink.send(route_result.message).await.unwrap();
                    high_priority_count += 1;
                }
                "output.info.normal" | "output.warning.normal" => {
                    normal_sink.send(route_result.message).await.unwrap();
                    normal_count += 1;
                }
                _ => {
                    // 其他路由
                }
            }
            
            processed_count += 1;
        } else {
            break;
        }
    }
    
    high_priority_sink.flush().await.unwrap();
    normal_sink.flush().await.unwrap();
    
    // 验证路由结果
    assert_eq!(high_priority_count, 2); // 2个高优先级错误
    assert_eq!(normal_count, 2);        // 1个info + 1个warning
    
    source.stop().await.unwrap();
    high_priority_sink.close().await.unwrap();
    normal_sink.close().await.unwrap();
}

/// 并发处理集成测试
#[tokio::test]
async fn test_concurrent_processing_integration() {
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    
    // 创建慢处理器来测试并发
    let slow_processor = Box::new(AsyncMapProcessor::new(
        "slow_processor",
        |mut msg: Message| async move {
            // 模拟耗时处理
            tokio::time::sleep(Duration::from_millis(50)).await;
            
            let mut payload = msg.payload().clone();
            payload["processed"] = json!(true);
            payload["processing_time"] = json!(50);
            msg.set_payload(payload);
            
            Ok(msg)
        }
    ));
    
    let parallel_processor = ParallelProcessor::new("parallel_test", slow_processor, 4);
    
    let mut source = BusStreamSource::with_defaults(bus.clone(), "parallel.input").await.unwrap();
    source.start().await.unwrap();
    
    let mut sink = BusStreamSink::with_defaults(bus.clone(), "parallel.output").await.unwrap();
    
    // 发布大量消息
    let publisher = bus.create_publisher().unwrap();
    let message_count = 20;
    
    for i in 0..message_count {
        let msg = MessageBuilder::new()
            .topic("parallel.input")
            .payload(json!({"id": i, "data": format!("message_{}", i)}))
            .build();
        publisher.publish(msg).await.unwrap();
    }
    
    sleep(Duration::from_millis(100)).await;
    
    // 收集消息进行并行处理
    let mut messages = Vec::new();
    let mut collected = 0;
    
    while collected < message_count {
        if let Some(Ok(msg)) = source.next().await {
            messages.push(msg);
            collected += 1;
        } else {
            break;
        }
    }
    
    // 并行处理
    let start_time = std::time::Instant::now();
    let processed_messages = parallel_processor.process(messages).await.unwrap();
    let processing_duration = start_time.elapsed();
    
    // 发送处理结果
    for msg in processed_messages {
        sink.send(msg).await.unwrap();
    }
    sink.flush().await.unwrap();
    
    // 验证并行处理效果
    assert_eq!(collected, message_count);
    // 并行处理应该比顺序处理快
    // 20个消息 * 50ms = 1000ms 顺序处理
    // 并行处理应该在 ~300ms 内完成（考虑到4个并行度）
    assert!(processing_duration < Duration::from_millis(600));
    
    println!("并行处理 {} 个消息耗时: {:?}", message_count, processing_duration);
    
    source.stop().await.unwrap();
    sink.close().await.unwrap();
}

/// 完整的数据管道性能测试
#[tokio::test]
async fn test_full_pipeline_performance() {
    let bus = Arc::new(InMemoryMessageBus::new().await.unwrap());
    
    // 创建复杂的处理管道
    let filter = Box::new(FilterProcessor::with_predicate(|msg: &Message| {
        msg.payload()["active"].as_bool().unwrap_or(true)
    }));
    
    let mapper = Box::new(MapProcessor::with_mapper(|mut msg: Message| {
        let mut payload = msg.payload().clone();
        payload["processed_at"] = json!(chrono::Utc::now().timestamp());
        msg.set_payload(payload);
        msg
    }));
    
    // 注意：这里我们需要将FilterProcessor的输出适配到ProcessorChain
    // 由于类型不匹配，我们使用简化的测试
    
    let mut source = BusStreamSource::with_defaults(bus.clone(), "perf.input").await.unwrap();
    source.start().await.unwrap();
    
    let mut sink = BusStreamSink::with_defaults(bus.clone(), "perf.output").await.unwrap();
    
    // 发布大量测试数据
    let publisher = bus.create_publisher().unwrap();
    let message_count = 1000;
    
    let start_publish = std::time::Instant::now();
    for i in 0..message_count {
        let msg = MessageBuilder::new()
            .topic("perf.input")
            .payload(json!({
                "id": i,
                "active": i % 10 != 0, // 90% 活跃消息
                "data": format!("performance_test_{}", i),
                "timestamp": chrono::Utc::now().timestamp()
            }))
            .build();
        publisher.publish(msg).await.unwrap();
    }
    let publish_duration = start_publish.elapsed();
    
    sleep(Duration::from_millis(200)).await;
    
    // 处理消息
    let start_process = std::time::Instant::now();
    let mut processed_count = 0;
    let mut active_count = 0;
    
    while processed_count < message_count {
        if let Some(Ok(msg)) = source.next().await {
            // 简化的处理逻辑
            if msg.payload()["active"].as_bool().unwrap_or(true) {
                let processed_msg = mapper.process(msg).await.unwrap();
                sink.send(processed_msg).await.unwrap();
                active_count += 1;
            }
            processed_count += 1;
        } else {
            break;
        }
    }
    
    sink.flush().await.unwrap();
    let process_duration = start_process.elapsed();
    
    // 性能统计
    let publish_throughput = message_count as f64 / publish_duration.as_secs_f64();
    let process_throughput = processed_count as f64 / process_duration.as_secs_f64();
    
    println!("性能测试结果:");
    println!("  发布 {} 条消息耗时: {:?} (吞吐量: {:.2} msg/s)", 
             message_count, publish_duration, publish_throughput);
    println!("  处理 {} 条消息耗时: {:?} (吞吐量: {:.2} msg/s)", 
             processed_count, process_duration, process_throughput);
    println!("  活跃消息数: {} (过滤率: {:.1}%)", 
             active_count, (1.0 - active_count as f64 / processed_count as f64) * 100.0);
    
    // 性能断言
    assert_eq!(processed_count, message_count);
    assert!(active_count > message_count * 8 / 10); // 至少80%的消息是活跃的
    assert!(process_throughput > 100.0); // 至少100 msg/s的处理能力
    
    source.stop().await.unwrap();
    sink.close().await.unwrap();
}