//! # 容错机制和错误处理单元测试
//! 
//! 测试死信队列、重试机制、错误处理等容错功能

use std::time::Duration;
use std::sync::Arc;
use tokio::time::sleep;
use serde_json::json;

use rustcloud_stream::prelude::*;
use rustcloud_bus::message::MessageBuilder;

#[tokio::test]
async fn test_stream_resilience_manager_success() {
    let config = StreamErrorHandlingConfig::default();
    let manager = StreamResilienceManager::new(config).await.unwrap();

    let message = MessageBuilder::new()
        .with_topic("test.success")
        .with_payload(json!({"operation": "test", "value": 42}))
        .build();

    // 测试成功的操作
    let result = manager.execute_with_resilience(
        || async { Ok::<String, StreamError>("operation_success".to_string()) },
        &message,
        None,
    ).await;

    assert!(result.is_ok());
    assert_eq!(result.unwrap(), "operation_success");

    // 检查统计信息
    let stats = manager.get_stats().await;
    assert_eq!(stats.total_processed, 1);
    assert_eq!(stats.successful, 1);
    assert_eq!(stats.retries, 0);
    assert_eq!(stats.dead_lettered, 0);
    assert_eq!(stats.timeouts, 0);
}

#[tokio::test]
async fn test_stream_resilience_manager_retry_then_success() {
    let mut config = StreamErrorHandlingConfig::default();
    config.retry.max_attempts = 3;
    config.retry.timeout = Duration::from_secs(1);
    
    let manager = StreamResilienceManager::new(config).await.unwrap();

    let message = MessageBuilder::new()
        .with_topic("test.retry")
        .with_payload(json!({"operation": "retry_test"}))
        .build();

    // 模拟前两次失败，第三次成功的操作
    let counter = Arc::new(std::sync::atomic::AtomicU32::new(0));
    let counter_clone = counter.clone();
    
    let result = manager.execute_with_resilience(
        move || {
            let count = counter_clone.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
            async move {
                if count < 2 {
                    Err(StreamError::network_error("Temporary network failure"))
                } else {
                    Ok("success_after_retry".to_string())
                }
            }
        },
        &message,
        None,
    ).await;

    assert!(result.is_ok());
    assert_eq!(result.unwrap(), "success_after_retry");
    
    // 验证重试次数
    assert_eq!(counter.load(std::sync::atomic::Ordering::SeqCst), 3);

    let stats = manager.get_stats().await;
    assert_eq!(stats.total_processed, 1);
    assert_eq!(stats.successful, 1);
    assert!(stats.retries > 0);
}

#[tokio::test]
async fn test_stream_resilience_manager_max_retries_exceeded() {
    let mut config = StreamErrorHandlingConfig::default();
    config.retry.max_attempts = 2;
    config.error_strategy = ErrorHandlingStrategy::RetryThenDeadLetter;
    
    let manager = StreamResilienceManager::new(config).await.unwrap();

    let message = MessageBuilder::new()
        .with_topic("test.max_retries")
        .with_payload(json!({"operation": "always_fail"}))
        .build();

    // 模拟总是失败的操作
    let result = manager.execute_with_resilience(
        || async { Err::<String, StreamError>(StreamError::processor_error("test", "Always fails")) },
        &message,
        None,
    ).await;

    assert!(result.is_err());

    let stats = manager.get_stats().await;
    assert_eq!(stats.total_processed, 1);
    assert_eq!(stats.successful, 0);
    assert_eq!(stats.dead_lettered, 1);
    
    // 检查死信队列
    let dlq_messages = manager.get_dead_letter_messages().await;
    assert_eq!(dlq_messages.len(), 1);
    assert_eq!(dlq_messages[0].error_code, "PROCESSOR_ERROR");
}

#[tokio::test]
async fn test_stream_resilience_manager_timeout() {
    let mut config = StreamErrorHandlingConfig::default();
    config.retry.timeout = Duration::from_millis(50);
    config.error_strategy = ErrorHandlingStrategy::RetryThenDeadLetter;
    
    let manager = StreamResilienceManager::new(config).await.unwrap();

    let message = MessageBuilder::new()
        .with_topic("test.timeout")
        .with_payload(json!({"operation": "slow_operation"}))
        .build();

    // 模拟超时的操作
    let result = manager.execute_with_resilience(
        || async {
            sleep(Duration::from_millis(100)).await; // 超过超时时间
            Ok::<String, StreamError>("should_not_reach".to_string())
        },
        &message,
        None,
    ).await;

    assert!(result.is_err());

    let stats = manager.get_stats().await;
    assert_eq!(stats.total_processed, 1);
    assert_eq!(stats.successful, 0);
    assert_eq!(stats.timeouts, 1);
    assert_eq!(stats.dead_lettered, 1);
}

#[tokio::test]
async fn test_dead_letter_message_creation() {
    let message = MessageBuilder::new()
        .with_topic("test.dlq")
        .with_payload(json!({"id": 123, "data": "test_data"}))
        .build();

    let error = StreamError::processor_error("test_processor", "Processing failed");
    
    let dl_message = DeadLetterMessage::new(message.clone(), &error, 3, None);

    assert_eq!(dl_message.original_message.id(), message.id());
    assert_eq!(dl_message.error_code, "PROCESSOR_ERROR");
    assert!(dl_message.error.contains("Processing failed"));
    assert_eq!(dl_message.retry_count, 3);
    assert!(dl_message.last_attempt <= dl_message.dead_letter_time);
}

#[tokio::test]
async fn test_stream_retryable() {
    let config = StreamRetryConfig {
        max_attempts: 3,
        strategy: RetryStrategy::FixedDelay,
        backoff: BackoffStrategy::Exponential {
            initial_delay: Duration::from_millis(10),
            max_delay: Duration::from_secs(1),
            multiplier: 2.0,
        },
        retry_conditions: vec![
            "TIMEOUT_ERROR".to_string(),
            "NETWORK_ERROR".to_string(),
        ],
        timeout: Duration::from_secs(5),
    };

    let retryable = DefaultStreamRetryable::new(config);

    // 测试可重试的错误
    let timeout_error = StreamError::timeout_error("test", 1000);
    assert!(retryable.should_retry(&timeout_error));

    let network_error = StreamError::network_error("Connection failed");
    assert!(retryable.should_retry(&network_error));

    // 测试不可重试的错误
    let config_error = StreamError::config_error("Invalid config");
    assert!(!retryable.should_retry(&config_error));

    let serialization_error = StreamError::serialization_error("Invalid JSON");
    assert!(!retryable.should_retry(&serialization_error));

    // 测试退避延迟
    let delay1 = retryable.get_retry_delay(0);
    let delay2 = retryable.get_retry_delay(1);
    let delay3 = retryable.get_retry_delay(2);

    assert_eq!(delay1, Duration::from_millis(10));
    assert_eq!(delay2, Duration::from_millis(20));
    assert_eq!(delay3, Duration::from_millis(40));
}

#[tokio::test]
async fn test_error_handling_strategies() {
    // 测试不同的错误处理策略
    let strategies = vec![
        ErrorHandlingStrategy::Ignore,
        ErrorHandlingStrategy::Fail,
        ErrorHandlingStrategy::Retry,
        ErrorHandlingStrategy::RetryThenDeadLetter,
        ErrorHandlingStrategy::DeadLetter,
        ErrorHandlingStrategy::Custom("custom_handler".to_string()),
    ];

    for strategy in strategies {
        let mut config = StreamErrorHandlingConfig::default();
        config.error_strategy = strategy.clone();
        
        let manager = StreamResilienceManager::new(config).await.unwrap();
        
        let message = MessageBuilder::new()
            .with_topic("test.strategy")
            .with_payload(json!({"strategy": format!("{:?}", strategy)}))
            .build();

        // 测试失败的操作
        let result = manager.execute_with_resilience(
            || async { Err::<String, StreamError>(StreamError::processor_error("test", "Test error")) },
            &message,
            None,
        ).await;

        // 所有策略都应该返回错误（因为操作失败了）
        assert!(result.is_err());
        
        let stats = manager.get_stats().await;
        assert_eq!(stats.total_processed, 1);
        assert_eq!(stats.successful, 0);
        
        // 根据策略检查死信队列状态
        match strategy {
            ErrorHandlingStrategy::RetryThenDeadLetter | ErrorHandlingStrategy::DeadLetter => {
                assert_eq!(stats.dead_lettered, 1);
            }
            _ => {
                // 其他策略可能不会发送到死信队列
            }
        }
    }
}

#[tokio::test]
async fn test_resilience_stats_tracking() {
    let config = StreamErrorHandlingConfig::default();
    let manager = StreamResilienceManager::new(config).await.unwrap();

    let message = MessageBuilder::new()
        .with_topic("test.stats")
        .with_payload(json!({"test": "stats_tracking"}))
        .build();

    // 执行多个操作以测试统计追踪
    for i in 0..5 {
        if i < 3 {
            // 前3个成功
            let _ = manager.execute_with_resilience(
                || async { Ok::<String, StreamError>(format!("success_{}", i)) },
                &message,
                None,
            ).await;
        } else {
            // 后2个失败
            let _ = manager.execute_with_resilience(
                || async { Err::<String, StreamError>(StreamError::processor_error("test", "Test failure")) },
                &message,
                None,
            ).await;
        }
    }

    let stats = manager.get_stats().await;
    assert_eq!(stats.total_processed, 5);
    assert_eq!(stats.successful, 3);
    assert_eq!(stats.dead_lettered, 2); // 失败的进入死信队列
    assert!(stats.last_updated > chrono::Utc::now() - chrono::Duration::seconds(1));
}

#[tokio::test]
async fn test_reprocess_dead_letter_message() {
    let config = StreamErrorHandlingConfig::default();
    let manager = StreamResilienceManager::new(config).await.unwrap();

    let message = MessageBuilder::new()
        .with_topic("test.reprocess")
        .with_payload(json!({"id": "reprocess_test"}))
        .build();

    // 创建一个失败的操作，将消息发送到死信队列
    let _ = manager.execute_with_resilience(
        || async { Err::<String, StreamError>(StreamError::processor_error("test", "Initial failure")) },
        &message,
        None,
    ).await;

    // 验证消息在死信队列中
    let dlq_messages = manager.get_dead_letter_messages().await;
    assert_eq!(dlq_messages.len(), 1);
    
    let message_id = &dlq_messages[0].original_message.id().to_string();

    // 尝试重新处理死信消息
    let reprocessed = manager.reprocess_dead_letter_message(message_id).await;
    
    // 注意：这里的实现可能返回错误，因为实际的重新处理逻辑需要更复杂的实现
    // 这个测试主要是验证API的可用性
    match reprocessed {
        Ok(Some(_)) => {
            // 重新处理成功
            println!("Message reprocessed successfully");
        }
        Ok(None) => {
            // 消息不存在或已处理
            println!("Message not found or already processed");
        }
        Err(_) => {
            // 重新处理失败，这在测试环境中是可以接受的
            println!("Reprocessing failed, which is expected in test environment");
        }
    }
}

#[tokio::test]
async fn test_cleanup_expired_messages() {
    let mut config = StreamErrorHandlingConfig::default();
    config.dead_letter.retention_duration = Duration::from_millis(100); // 很短的保留时间
    
    let manager = StreamResilienceManager::new(config).await.unwrap();

    let message = MessageBuilder::new()
        .with_topic("test.cleanup")
        .with_payload(json!({"test": "cleanup"}))
        .build();

    // 创建一些死信消息
    for i in 0..3 {
        let test_msg = MessageBuilder::new()
            .with_topic("test.cleanup")
            .with_payload(json!({"id": i}))
            .build();
            
        let _ = manager.execute_with_resilience(
            || async { Err::<String, StreamError>(StreamError::processor_error("test", "Test error")) },
            &test_msg,
            None,
        ).await;
    }

    // 验证消息在死信队列中
    let dlq_messages = manager.get_dead_letter_messages().await;
    assert_eq!(dlq_messages.len(), 3);

    // 等待消息过期
    sleep(Duration::from_millis(150)).await;

    // 清理过期消息
    let cleaned_count = manager.cleanup_expired_messages().await.unwrap();
    
    // 在真实实现中，这些消息应该被清理
    // 在测试环境中，结果可能因具体实现而异
    println!("Cleaned {} expired messages", cleaned_count);
}

#[tokio::test]
async fn test_concurrent_resilience_operations() {
    let config = StreamErrorHandlingConfig::default();
    let manager = Arc::new(StreamResilienceManager::new(config).await.unwrap());

    let message = MessageBuilder::new()
        .with_topic("test.concurrent")
        .with_payload(json!({"test": "concurrency"}))
        .build();

    // 并发执行多个操作
    let tasks: Vec<_> = (0..10).map(|i| {
        let manager_clone = manager.clone();
        let message_clone = message.clone();
        tokio::spawn(async move {
            if i % 2 == 0 {
                // 偶数索引成功
                manager_clone.execute_with_resilience(
                    move || async { Ok::<String, StreamError>(format!("success_{}", i)) },
                    &message_clone,
                    None,
                ).await
            } else {
                // 奇数索引失败
                manager_clone.execute_with_resilience(
                    move || async { Err::<String, StreamError>(StreamError::processor_error("test", &format!("failure_{}", i))) },
                    &message_clone,
                    None,
                ).await
            }
        })
    }).collect();

    // 等待所有任务完成
    let results = futures::future::join_all(tasks).await;
    
    let success_count = results.iter().filter(|r| r.as_ref().unwrap().is_ok()).count();
    let failure_count = results.iter().filter(|r| r.as_ref().unwrap().is_err()).count();
    
    assert_eq!(success_count, 5);
    assert_eq!(failure_count, 5);

    let stats = manager.get_stats().await;
    assert_eq!(stats.total_processed, 10);
    assert_eq!(stats.successful, 5);
    assert_eq!(stats.dead_lettered, 5);
}