//! # 流处理容错机制模块
//!
//! 集成 rustcloud-resilience 的容错功能，为流处理提供：
//! - 死信队列处理
//! - 智能重试机制
//! - 错误处理策略
//! - 超时控制

use async_trait::async_trait;
use rustcloud_resilience::prelude::*;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::RwLock;
use chrono::{DateTime, Utc};

use crate::error::{StreamError, StreamResult};
use crate::pipeline::{Message, ProcessingContext};

/// 流处理重试配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamRetryConfig {
    /// 最大重试次数
    pub max_attempts: u32,
    /// 退避策略
    pub backoff: BackoffStrategy,
    /// 重试条件
    pub retry_conditions: Vec<String>,
    /// 超时设置
    pub timeout: Duration,
}

impl Default for StreamRetryConfig {
    fn default() -> Self {
        Self {
            max_attempts: 3,
            backoff: BackoffStrategy::Linear { 
                initial_delay: Duration::from_millis(100),
                increment: Duration::from_millis(100),
            },
            retry_conditions: vec![
                "TIMEOUT_ERROR".to_string(),
                "NETWORK_ERROR".to_string(),
                "RESOURCE_EXHAUSTED".to_string(),
            ],
            timeout: Duration::from_secs(10),
        }
    }
}

/// 流处理死信队列配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamDeadLetterConfig {
    /// 是否启用死信队列
    pub enabled: bool,
    /// 死信队列主题
    pub topic: String,
    /// 最大重试次数后进入死信队列
    pub max_retries_before_dlq: u32,
    /// 死信消息保留时间
    pub retention_duration: Duration,
    /// 死信队列容量
    pub capacity: usize,
}

impl Default for StreamDeadLetterConfig {
    fn default() -> Self {
        Self {
            enabled: true,
            topic: "stream.dead-letter".to_string(),
            max_retries_before_dlq: 3,
            retention_duration: Duration::from_secs(24 * 3600), // 24小时
            capacity: 10000,
        }
    }
}

/// 流处理错误处理配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StreamErrorHandlingConfig {
    /// 重试配置
    pub retry: StreamRetryConfig,
    /// 死信队列配置
    pub dead_letter: StreamDeadLetterConfig,
    /// 错误处理策略
    pub error_strategy: ErrorHandlingStrategy,
    /// 熔断器配置
    pub circuit_breaker: Option<CircuitBreakerConfig>,
}

impl Default for StreamErrorHandlingConfig {
    fn default() -> Self {
        Self {
            retry: StreamRetryConfig::default(),
            dead_letter: StreamDeadLetterConfig::default(),
            error_strategy: ErrorHandlingStrategy::RetryThenDeadLetter,
            circuit_breaker: Some(CircuitBreakerConfig::default()),
        }
    }
}

/// 错误处理策略
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ErrorHandlingStrategy {
    /// 忽略错误，继续处理
    Ignore,
    /// 立即失败
    Fail,
    /// 重试后失败
    Retry,
    /// 重试后进入死信队列
    RetryThenDeadLetter,
    /// 立即进入死信队列
    DeadLetter,
    /// 自定义处理
    Custom(String),
}

/// 死信消息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeadLetterMessage {
    /// 原始消息
    pub original_message: Message,
    /// 错误信息
    pub error: String,
    /// 错误代码
    pub error_code: String,
    /// 重试次数
    pub retry_count: u32,
    /// 最后一次处理时间
    pub last_attempt: DateTime<Utc>,
    /// 进入死信队列时间
    pub dead_letter_time: DateTime<Utc>,
    /// 处理上下文
    pub context: Option<ProcessingContext>,
}

impl DeadLetterMessage {
    /// 创建死信消息
    pub fn new(
        message: Message,
        error: &StreamError,
        retry_count: u32,
        context: Option<ProcessingContext>,
    ) -> Self {
        let now = Utc::now();
        Self {
            original_message: message,
            error: error.to_string(),
            error_code: error.error_code().to_string(),
            retry_count,
            last_attempt: now,
            dead_letter_time: now,
            context,
        }
    }
}

/// 流处理容错管理器
pub struct StreamResilienceManager {
    /// 重试管理器
    retry_manager: Arc<RetryExecutor>,
    /// 死信队列
    dead_letter_queue: Arc<dyn DeadLetterQueue<DeadLetterMessage>>,
    /// 错误处理器
    error_handler: Arc<dyn ErrorHandler>,
    /// 超时管理器
    timeout_manager: Arc<TimeoutManager>,
    /// 配置
    config: StreamErrorHandlingConfig,
    /// 统计信息
    stats: Arc<RwLock<ResilienceStats>>,
}

/// 容错统计信息
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct ResilienceStats {
    /// 总处理消息数
    pub total_processed: u64,
    /// 成功处理数
    pub successful: u64,
    /// 重试次数
    pub retries: u64,
    /// 进入死信队列数
    pub dead_lettered: u64,
    /// 超时次数
    pub timeouts: u64,
    /// 最后更新时间
    pub last_updated: DateTime<Utc>,
}

impl StreamResilienceManager {
    /// 创建新的容错管理器
    pub async fn new(config: StreamErrorHandlingConfig) -> StreamResult<Self> {
        // 创建重试管理器配置
        // 创建重试管理器配置
        let retry_config = RetryConfig {
            max_attempts: config.retry.max_attempts,
            backoff_strategy: config.retry.backoff.clone(),
            retry_conditions: vec![], // 简化实现
            budget_percent: 10.0,
            jitter_enabled: true,
            timeout: Some(config.retry.timeout),
        };

        let retry_manager = Arc::new(RetryExecutor::new(retry_config));

        // 创建死信队列
        let dlq_config = DeadLetterConfig {
            enabled: config.dead_letter.enabled,
            max_size: config.dead_letter.capacity,
            retention_duration: config.dead_letter.retention_duration,
        };
        
        let dead_letter_queue = Arc::new(
            InMemoryDeadLetterQueue::new(dlq_config)
                .map_err(|e| StreamError::initialization_error(format!("死信队列初始化失败: {}", e)))?
        );

        // 创建错误处理器
        let error_handler = Arc::new(
            CompositeErrorHandler::new()
                .add_handler(Arc::new(LoggingErrorHandler))
                .add_handler(Arc::new(DeadLetterErrorHandler::new(dead_letter_queue.clone())))
        );

        // 创建超时管理器
        let timeout_manager = Arc::new(TimeoutManager::new());

        Ok(Self {
            retry_manager,
            dead_letter_queue,
            error_handler,
            timeout_manager,
            config,
            stats: Arc::new(RwLock::new(ResilienceStats::default())),
        })
    }

    /// 执行带容错的操作
    pub async fn execute_with_resilience<F, T, Fut>(
        &self,
        operation: F,
        message: &Message,
        context: Option<ProcessingContext>,
    ) -> StreamResult<T>
    where
        F: Fn() -> Fut + Send + Sync,
        Fut: std::future::Future<Output = Result<T, StreamError>> + Send,
    {
        let mut stats = self.stats.write().await;
        stats.total_processed += 1;
        stats.last_updated = Utc::now();
        drop(stats);

        // 执行带超时的重试操作
        let result = self.timeout_manager
            .with_timeout(self.config.retry.timeout, async {
                self.retry_manager.execute(|| async {
                    operation().await.map_err(|e| {
                        crate::ResilienceError::operation_failed(e.to_string())
                    })
                }).await
            })
            .await;

        match result {
            Ok(Ok(value)) => {
                // 操作成功
                let mut stats = self.stats.write().await;
                stats.successful += 1;
                Ok(value)
            }
            Ok(Err(e)) => {
                // 重试耗尽或其他错误
                let mut stats = self.stats.write().await;
                stats.retries += 1;
                drop(stats);

                let stream_error = StreamError::internal_error(e.to_string());
                self.handle_final_error(message, &stream_error, 1, context).await
            }
            Err(_) => {
                // 超时
                let mut stats = self.stats.write().await;
                stats.timeouts += 1;
                drop(stats);

                let timeout_error = StreamError::timeout_error("操作执行", self.config.retry.timeout.as_millis() as u64);
                self.handle_final_error(message, &timeout_error, 0, context).await
            }
        }
    }

    /// 处理最终错误
    async fn handle_final_error<T>(
        &self,
        message: &Message,
        error: &StreamError,
        retry_count: u32,
        context: Option<ProcessingContext>,
    ) -> StreamResult<T> {
        match self.config.error_strategy {
            ErrorHandlingStrategy::Ignore => {
                tracing::warn!("忽略错误: {}", error);
                Err(error.clone())
            }
            ErrorHandlingStrategy::Fail => {
                Err(error.clone())
            }
            ErrorHandlingStrategy::Retry => {
                // 已经重试过了，直接返回错误
                Err(error.clone())
            }
            ErrorHandlingStrategy::RetryThenDeadLetter | ErrorHandlingStrategy::DeadLetter => {
                // 发送到死信队列
                self.send_to_dead_letter(message, error, retry_count, context).await?;
                Err(error.clone())
            }
            ErrorHandlingStrategy::Custom(ref strategy) => {
                tracing::warn!("自定义错误处理策略: {}, 错误: {}", strategy, error);
                Err(error.clone())
            }
        }
    }

    /// 发送消息到死信队列
    async fn send_to_dead_letter(
        &self,
        message: &Message,
        error: &StreamError,
        retry_count: u32,
        context: Option<ProcessingContext>,
    ) -> StreamResult<()> {
        if !self.config.dead_letter.enabled {
            return Ok(());
        }

        let dead_letter_msg = DeadLetterMessage::new(
            message.clone(),
            error,
            retry_count,
            context,
        );

        self.dead_letter_queue.send(dead_letter_msg).await
            .map_err(|e| StreamError::internal_error(format!("发送死信消息失败: {}", e)))?;

        let mut stats = self.stats.write().await;
        stats.dead_lettered += 1;

        tracing::warn!(
            "消息已发送到死信队列: message_id={}, error={}, retry_count={}",
            message.id(),
            error,
            retry_count
        );

        Ok(())
    }

    /// 获取统计信息
    pub async fn get_stats(&self) -> ResilienceStats {
        self.stats.read().await.clone()
    }

    /// 获取死信队列消息
    pub async fn get_dead_letter_messages(&self) -> Vec<DeadLetterMessage> {
        self.dead_letter_queue.list().await.unwrap_or_default()
    }

    /// 重新处理死信消息
    pub async fn reprocess_dead_letter_message(&self, message_id: &str) -> StreamResult<Option<DeadLetterMessage>> {
        self.dead_letter_queue.requeue(message_id).await
            .map_err(|e| StreamError::internal_error(format!("重新处理死信消息失败: {}", e)))
    }

    /// 清理过期的死信消息
    pub async fn cleanup_expired_messages(&self) -> StreamResult<u64> {
        let count = self.dead_letter_queue.cleanup_expired().await
            .map_err(|e| StreamError::internal_error(format!("清理过期消息失败: {}", e)))?;
        
        tracing::info!("清理了 {} 条过期的死信消息", count);
        Ok(count)
    }
}

/// 流处理重试器 trait
#[async_trait]
pub trait StreamRetryable {
    /// 是否应该重试此错误
    fn should_retry(&self, error: &StreamError) -> bool;
    
    /// 获取重试延迟
    fn get_retry_delay(&self, attempt: u32) -> Duration;
}

/// 默认流处理重试器
pub struct DefaultStreamRetryable {
    config: StreamRetryConfig,
}

impl DefaultStreamRetryable {
    pub fn new(config: StreamRetryConfig) -> Self {
        Self { config }
    }
}

#[async_trait]
impl StreamRetryable for DefaultStreamRetryable {
    fn should_retry(&self, error: &StreamError) -> bool {
        if !error.is_retryable() {
            return false;
        }

        let error_code = error.error_code();
        self.config.retry_conditions.contains(&error_code.to_string())
    }

    fn get_retry_delay(&self, attempt: u32) -> Duration {
        match &self.config.backoff {
            BackoffStrategy::Fixed { delay } => *delay,
            BackoffStrategy::Linear { initial_delay, increment } => {
                let delay = *initial_delay + *increment * attempt;
                delay
            }
            BackoffStrategy::Exponential { initial_delay, max_delay, multiplier } => {
                let delay = Duration::from_millis(
                    (initial_delay.as_millis() as f64 * multiplier.powi(attempt as i32)) as u64
                );
                std::cmp::min(delay, *max_delay)
            }
            BackoffStrategy::Custom { .. } => {
                // 对于自定义策略，使用默认固定延迟
                Duration::from_millis(1000)
            }
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::pipeline::MessageBuilder;

    #[tokio::test]
    async fn test_stream_resilience_manager() {
        let config = StreamErrorHandlingConfig::default();
        let manager = StreamResilienceManager::new(config).await.unwrap();

        let message = MessageBuilder::new()
            .topic("test")
            .payload(serde_json::json!({"key": "value"}))
            .build().unwrap();

        // 测试成功操作
        let result = manager.execute_with_resilience(
            || async { Ok::<String, StreamError>("success".to_string()) },
            &message,
            None,
        ).await;

        assert!(result.is_ok());
        assert_eq!(result.unwrap(), "success");

        let stats = manager.get_stats().await;
        assert_eq!(stats.total_processed, 1);
        assert_eq!(stats.successful, 1);
    }

    #[tokio::test]
    async fn test_stream_retryable() {
        let config = StreamRetryConfig::default();
        let retryable = DefaultStreamRetryable::new(config);

        let timeout_error = StreamError::timeout_error("test", 1000);
        assert!(retryable.should_retry(&timeout_error));

        let config_error = StreamError::config_error("test");
        assert!(!retryable.should_retry(&config_error));
    }

    #[tokio::test]
    async fn test_dead_letter_message() {
        let message = MessageBuilder::new()
            .topic("test")
            .payload(serde_json::json!({"key": "value"}))
            .build().unwrap();

        let error = StreamError::processor_error("test_processor", "test error");
        let dl_msg = DeadLetterMessage::new(message, &error, 3, None);

        assert_eq!(dl_msg.error_code, "PROCESSOR_ERROR");
        assert_eq!(dl_msg.retry_count, 3);
    }
}