//! Supervision strategies for actor restart policies
//! 
//! This module provides various supervision strategies that can be used to define
//! how actors should be restarted after failures.
//! 
//! Supervision strategies can be applied to actors to control their behavior when they
//! encounter failures or when linked actors die. These strategies include exponential 
//! backoff restart, rate-limited restart, and other configurable restart policies.

use std::{collections::VecDeque, fmt, sync::Arc, time::{Duration, Instant}};

use tokio::sync::Semaphore;

use crate::{
    actor::{Actor, ActorID, ActorRef, WeakActorRef, actor_ref::Links},
    error::{ActorStopReason, BoxError, PanicError},
    mailbox::bounded::BoundedMailbox,
};

/// Trait for defining actor supervision strategies
pub trait SupervisionStrategy: Send + 'static {
    /// Determine whether to restart an actor after a panic
    /// 
    /// # Parameters
    /// - `err`: The panic error that occurred
    /// - `context`: Additional context for making the decision
    /// 
    /// # Returns
    /// - A supervision decision indicating what action to take
    fn handle_panic(
        &mut self, 
        err: &PanicError,
        context: &mut SupervisionContext,
    ) -> SupervisionDecision;

    /// Determine whether to restart an actor after a linked actor died
    /// 
    /// # Parameters
    /// - `id`: The ID of the linked actor that died
    /// - `reason`: The reason why the linked actor stopped
    /// - `context`: Additional context for making the decision
    /// 
    /// # Returns
    /// - A supervision decision indicating what action to take
    fn handle_link_died(
        &mut self,
        id: &ActorID,
        reason: &ActorStopReason,
        context: &mut SupervisionContext,
    ) -> SupervisionDecision;
}

/// Context information for supervision decisions
pub struct SupervisionContext {
    /// History of failures with timestamps
    pub failure_history: VecDeque<(Instant, FailureType)>,
    /// Maximum number of failures to keep in history
    pub max_history_size: usize,
}

impl SupervisionContext {
    /// Create a new supervision context
    pub fn new(max_history_size: usize) -> Self {
        Self {
            failure_history: VecDeque::with_capacity(max_history_size),
            max_history_size,
        }
    }

    /// Record a new failure
    pub fn record_failure(&mut self, failure_type: FailureType) {
        if self.failure_history.len() >= self.max_history_size {
            self.failure_history.pop_front();
        }
        self.failure_history.push_back((Instant::now(), failure_type));
    }

    /// Get the number of failures within a specific time window
    pub fn failures_in_window(&self, window: Duration) -> usize {
        let now = Instant::now();
        self.failure_history
            .iter()
            .filter(|(time, _)| now.duration_since(*time) <= window)
            .count()
    }
}

/// Type of failure that occurred
#[derive(Debug, Clone)]
pub enum FailureType {
    /// The actor panicked
    Panic,
    /// A linked actor died
    LinkedActorDied,
}

/// Decision about how to handle an actor after a failure
#[derive(Debug)]
pub enum SupervisionDecision {
    /// Stop the actor with the specified reason
    Stop(ActorStopReason),
    /// Continue the actor's operation
    Continue,
    /// Restart the actor after a delay
    RestartAfter(Duration),
}

/// Default strategy that follows Kameo's default behavior
pub struct DefaultStrategy;

impl SupervisionStrategy for DefaultStrategy {
    fn handle_panic(
        &mut self,
        err: &PanicError,
        context: &mut SupervisionContext,
    ) -> SupervisionDecision {
        context.record_failure(FailureType::Panic);
        SupervisionDecision::Stop(ActorStopReason::Panicked(err.clone()))
    }

    fn handle_link_died(
        &mut self,
        id: &ActorID,
        reason: &ActorStopReason,
        context: &mut SupervisionContext,
    ) -> SupervisionDecision {
        context.record_failure(FailureType::LinkedActorDied);
        match reason {
            ActorStopReason::Normal => SupervisionDecision::Continue,
            _ => SupervisionDecision::Stop(ActorStopReason::LinkDied {
                id: *id,
                reason: Box::new(reason.clone()),
            }),
        }
    }
}

/// Strategy that implements exponential backoff for restarts
pub struct ExponentialBackoffStrategy {
    /// Maximum number of restart attempts
    pub max_retries: usize,
    /// Base duration for the exponential backoff
    pub base_duration: Duration,
    /// Maximum duration between restart attempts
    pub max_duration: Duration,
    /// Current retry count
    retry_count: usize,
}

impl ExponentialBackoffStrategy {
    /// Create a new exponential backoff strategy
    pub fn new(max_retries: usize, base_duration: Duration, max_duration: Duration) -> Self {
        Self {
            max_retries,
            base_duration,
            max_duration,
            retry_count: 0,
        }
    }

    /// Calculate the next backoff duration
    fn next_backoff_duration(&mut self) -> Duration {
        if self.retry_count >= self.max_retries {
            return self.max_duration;
        }

        // Calculate exponential backoff: base * 2^retry_count
        let factor = 1u32 << self.retry_count.min(31);
        self.retry_count += 1;
        
        let duration = self.base_duration.mul_f64(factor as f64);
        duration.min(self.max_duration)
    }
}

impl SupervisionStrategy for ExponentialBackoffStrategy {
    fn handle_panic(
        &mut self,
        err: &PanicError,
        context: &mut SupervisionContext,
    ) -> SupervisionDecision {
        context.record_failure(FailureType::Panic);
        
        if self.retry_count >= self.max_retries {
            return SupervisionDecision::Stop(ActorStopReason::Panicked(err.clone()));
        }
        
        let backoff = self.next_backoff_duration();
        SupervisionDecision::RestartAfter(backoff)
    }

    fn handle_link_died(
        &mut self,
        id: &ActorID,
        reason: &ActorStopReason,
        context: &mut SupervisionContext,
    ) -> SupervisionDecision {
        context.record_failure(FailureType::LinkedActorDied);
        
        match reason {
            ActorStopReason::Normal => SupervisionDecision::Continue,
            _ => {
                if self.retry_count >= self.max_retries {
                    return SupervisionDecision::Stop(ActorStopReason::LinkDied {
                        id: *id,
                        reason: Box::new(reason.clone()),
                    });
                }
                
                let backoff = self.next_backoff_duration();
                SupervisionDecision::RestartAfter(backoff)
            }
        }
    }
}

/// Strategy that limits the rate of restarts
pub struct RateLimitedStrategy {
    /// Maximum number of restarts allowed in the time window
    pub max_restarts: usize,
    /// Time window for counting restarts
    pub window: Duration,
    /// Fallback strategy to use when rate limit is exceeded
    fallback: Box<dyn SupervisionStrategy>,
}

impl RateLimitedStrategy {
    /// Create a new rate-limited restart strategy
    pub fn new(
        max_restarts: usize, 
        window: Duration,
        fallback: impl SupervisionStrategy + 'static,
    ) -> Self {
        Self {
            max_restarts,
            window,
            fallback: Box::new(fallback),
        }
    }
}

impl SupervisionStrategy for RateLimitedStrategy {
    fn handle_panic(
        &mut self,
        err: &PanicError,
        context: &mut SupervisionContext,
    ) -> SupervisionDecision {
        context.record_failure(FailureType::Panic);
        
        // Check if we've exceeded the rate limit
        let failures_in_window = context.failures_in_window(self.window);
        if failures_in_window > self.max_restarts {
            // Delegate to fallback strategy
            return self.fallback.handle_panic(err, context);
        }
        
        // Allow restart
        SupervisionDecision::RestartAfter(Duration::from_millis(100))
    }

    fn handle_link_died(
        &mut self,
        id: &ActorID,
        reason: &ActorStopReason,
        context: &mut SupervisionContext,
    ) -> SupervisionDecision {
        context.record_failure(FailureType::LinkedActorDied);
        
        // Auto-continue for normal exits
        if matches!(reason, ActorStopReason::Normal) {
            return SupervisionDecision::Continue;
        }
        
        // Check if we've exceeded the rate limit
        let failures_in_window = context.failures_in_window(self.window);
        if failures_in_window > self.max_restarts {
            // Delegate to fallback strategy
            return self.fallback.handle_link_died(id, reason, context);
        }
        
        // Allow restart
        SupervisionDecision::RestartAfter(Duration::from_millis(100))
    }
}

/// Trait for actors that use supervision strategies
pub trait SupervisedActor: Actor {
    /// Get the supervision strategy for this actor
    fn supervision_strategy(&self) -> Box<dyn SupervisionStrategy>;
    
    /// Get the supervision context for this actor
    fn supervision_context(&mut self) -> &mut SupervisionContext;
    
    /// Handle actor panic with supervision strategy
    async fn handle_supervised_panic(
        &mut self,
        actor_ref: WeakActorRef<Self>,
        err: PanicError,
    ) -> Result<Option<ActorStopReason>, BoxError> {
        let decision = self.supervision_strategy().handle_panic(
            &err,
            self.supervision_context(),
        );
        
        match decision {
            SupervisionDecision::Stop(reason) => Ok(Some(reason)),
            SupervisionDecision::Continue => Ok(None),
            SupervisionDecision::RestartAfter(delay) => {
                tokio::time::sleep(delay).await;
                Ok(None)
            }
        }
    }
    
    /// Handle linked actor death with supervision strategy
    async fn handle_supervised_link_died(
        &mut self,
        actor_ref: WeakActorRef<Self>,
        id: ActorID,
        reason: ActorStopReason,
    ) -> Result<Option<ActorStopReason>, BoxError> {
        let decision = self.supervision_strategy().handle_link_died(
            &id,
            &reason,
            self.supervision_context(),
        );
        
        match decision {
            SupervisionDecision::Stop(reason) => Ok(Some(reason)),
            SupervisionDecision::Continue => Ok(None),
            SupervisionDecision::RestartAfter(delay) => {
                tokio::time::sleep(delay).await;
                Ok(None)
            }
        }
    }
}

/// Builder for creating supervision strategies
pub struct SupervisionStrategyBuilder {
    strategy: Box<dyn SupervisionStrategy>,
}

impl SupervisionStrategyBuilder {
    /// Create a new strategy builder with the default strategy
    pub fn new() -> Self {
        Self {
            strategy: Box::new(DefaultStrategy),
        }
    }
    
    /// Add exponential backoff restart strategy
    pub fn with_exponential_backoff(
        self,
        max_retries: usize,
        base_duration: Duration,
        max_duration: Duration,
    ) -> Self {
        Self {
            strategy: Box::new(ExponentialBackoffStrategy::new(
                max_retries, 
                base_duration, 
                max_duration
            )),
        }
    }
    
    /// Add rate-limited restart strategy
    pub fn with_rate_limited_restarts(
        self,
        max_restarts: usize,
        window: Duration,
    ) -> Self {
        Self {
            strategy: Box::new(RateLimitedStrategy::new(
                max_restarts,
                window, 
                DefaultStrategy,
            )),
        }
    }
    
    /// Build the configured strategy
    pub fn build(self) -> Box<dyn SupervisionStrategy> {
        self.strategy
    }
}

impl Default for SupervisionStrategyBuilder {
    fn default() -> Self {
        Self::new()
    }
}

#[cfg(test)]
mod tests {
    use std::time::Duration;
    use crate::error::PanicError;
    use super::*;
    use crate::mailbox::bounded::BoundedMailbox;
    use futures::stream::{self, AbortHandle};
    use crate::actor::actor_ref::Links;

    struct TestActor;

    impl crate::Actor for TestActor {
        type Mailbox = BoundedMailbox<Self>;
    }

    // We don't need to create dummy WeakActorRef for our tests anymore
    #[test]
    fn test_default_strategy() {
        let mut strategy = DefaultStrategy;
        let mut context = SupervisionContext::new(10);

        // Test panic handling
        let err = PanicError::new("test panic");
        let decision = strategy.handle_panic(&err, &mut context);

        match decision {
            SupervisionDecision::Stop(reason) => {
                assert!(matches!(reason, ActorStopReason::Panicked(_)));
            }
            _ => panic!("Expected Stop decision"),
        }

        // Test link death - normal exit
        let id = ActorID::generate();
        let reason = ActorStopReason::Normal;
        let decision = strategy.handle_link_died(&id, &reason, &mut context);

        match decision {
            SupervisionDecision::Continue => {}
            _ => panic!("Expected Continue decision"),
        }

        // Test link death - abnormal exit
        let id = ActorID::generate();
        let reason = ActorStopReason::Killed;
        let decision = strategy.handle_link_died(&id, &reason, &mut context);

        match decision {
            SupervisionDecision::Stop(reason) => {
                assert!(matches!(reason, ActorStopReason::LinkDied { .. }));
            }
            _ => panic!("Expected Stop decision"),
        }
    }
    
    #[test]
    fn test_exponential_backoff_strategy() {
        let mut strategy = ExponentialBackoffStrategy::new(
            3, // max retries
            Duration::from_millis(100), // base duration
            Duration::from_secs(5), // max duration
        );
        let mut context = SupervisionContext::new(10);
        
        // First retry should restart with ~100ms delay
        let err = PanicError::new("test panic");
        let decision = strategy.handle_panic(&err, &mut context);
        
        match decision {
            SupervisionDecision::RestartAfter(delay) => {
                assert!(delay >= Duration::from_millis(100));
                assert!(delay <= Duration::from_millis(120)); // Some wiggle room
            },
            _ => panic!("Expected RestartAfter decision, got: {:?}", decision),
        }
        
        // Second retry should restart with ~200ms delay (2^1 * 100ms)
        let decision = strategy.handle_panic(&err, &mut context);
        
        match decision {
            SupervisionDecision::RestartAfter(delay) => {
                assert!(delay >= Duration::from_millis(200));
                assert!(delay <= Duration::from_millis(220)); // Some wiggle room
            },
            _ => panic!("Expected RestartAfter decision, got: {:?}", decision),
        }
        
        // Third retry should restart with ~400ms delay (2^2 * 100ms)
        let decision = strategy.handle_panic(&err, &mut context);
        
        match decision {
            SupervisionDecision::RestartAfter(delay) => {
                assert!(delay >= Duration::from_millis(400));
                assert!(delay <= Duration::from_millis(420)); // Some wiggle room
            },
            _ => panic!("Expected RestartAfter decision, got: {:?}", decision),
        }
        
        // Fourth retry should stop (exceeded max_retries)
        let decision = strategy.handle_panic(&err, &mut context);
        
        match decision {
            SupervisionDecision::Stop(_) => {}, // expected
            _ => panic!("Expected Stop decision, got: {:?}", decision),
        }
    }
    
    #[test]
    fn test_rate_limited_strategy() {
        let mut strategy = RateLimitedStrategy::new(
            2,
            Duration::from_millis(100),
            DefaultStrategy,
        );
        let mut context = SupervisionContext::new(10);
        
        // First failure should allow restart with a delay
        let err = PanicError::new("test panic");
        let decision = strategy.handle_panic(&err, &mut context);
        
        match decision {
            SupervisionDecision::RestartAfter(_) => {}
            _ => panic!("Expected RestartAfter decision"),
        }
        
        // Call handle_panic again - this should be the second failure
        // Note that handle_panic records a failure each time it's called
        let decision = strategy.handle_panic(&err, &mut context);
        
        match decision {
            SupervisionDecision::RestartAfter(_) => {}
            _ => panic!("Expected RestartAfter decision"),
        }
        
        // Call handle_panic again - this should be the third failure
        // At this point we should exceed the max_restarts (which is 2)
        // and the fallback strategy (DefaultStrategy) should be used
        let decision = strategy.handle_panic(&err, &mut context);
        
        match decision {
            SupervisionDecision::Stop(reason) => {
                assert!(matches!(reason, ActorStopReason::Panicked(_)));
            }
            _ => panic!("Expected Stop decision from fallback"),
        }
        
        // Test link death (with killed reason and no failures yet)
        let id = ActorID::generate();
        let reason = ActorStopReason::Killed;
        context = SupervisionContext::new(10); // Reset context
        
        let decision = strategy.handle_link_died(&id, &reason, &mut context);
        
        match decision {
            SupervisionDecision::RestartAfter(_) => {}
            _ => panic!("Expected RestartAfter decision"),
        }
    }
    
    #[test]
    fn test_supervision_context() {
        let mut context = SupervisionContext::new(5);
        
        // Add failures
        context.record_failure(FailureType::Panic);
        context.record_failure(FailureType::LinkedActorDied);
        context.record_failure(FailureType::Panic);
        
        // Check count in window
        assert_eq!(context.failures_in_window(Duration::from_secs(10)), 3);
        
        // Add more failures to test the max history size
        context.record_failure(FailureType::Panic);
        context.record_failure(FailureType::Panic);
        context.record_failure(FailureType::LinkedActorDied);
        
        // Should have dropped the first failure
        assert_eq!(context.failure_history.len(), 5);
        assert_eq!(context.failures_in_window(Duration::from_secs(10)), 5);
    }
    
    #[test]
    fn test_strategy_builder() {
        // Test default strategy
        let builder = SupervisionStrategyBuilder::new();
        let mut strategy = builder.build();
        
        // Since we can't easily check the concrete type, check the behavior instead
        let mut context = SupervisionContext::new(10);
        let err = PanicError::new("test panic");
        
        // DefaultStrategy should return Stop(Panicked) for panics
        let decision = strategy.as_mut().handle_panic(&err, &mut context);
        match decision {
            SupervisionDecision::Stop(reason) => {
                assert!(matches!(reason, ActorStopReason::Panicked(_)));
            }
            _ => panic!("Default strategy should return Stop for panics"),
        }
        
        // Test with exponential backoff
        let builder = SupervisionStrategyBuilder::new()
            .with_exponential_backoff(3, Duration::from_millis(100), Duration::from_secs(1));
        let mut strategy = builder.build();
        
        // ExponentialBackoffStrategy should return RestartAfter for panics
        let decision = strategy.as_mut().handle_panic(&err, &mut context);
        match decision {
            SupervisionDecision::RestartAfter(_) => {}
            _ => panic!("Exponential backoff strategy should return RestartAfter for panics"),
        }
        
        // Test with rate limited restarts
        let builder = SupervisionStrategyBuilder::new()
            .with_rate_limited_restarts(5, Duration::from_secs(1));
        let mut strategy = builder.build();
        
        // RateLimitedStrategy should return RestartAfter for panics
        let decision = strategy.as_mut().handle_panic(&err, &mut context);
        match decision {
            SupervisionDecision::RestartAfter(_) => {}
            _ => panic!("Rate limited strategy should return RestartAfter for panics"),
        }
    }
} 