use crate::config::AppConfig;
use crate::errors::AppResult;
use anyhow::Result;
use lru::LruCache;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::{Mutex, RwLock};

/// Performance monitoring and optimization utilities
pub struct PerformanceManager {
    /// Cache for embedding vectors
    embedding_cache: Arc<Mutex<LruCache<String, Vec<f32>>>>,
    /// Cache for API responses
    response_cache: Arc<Mutex<LruCache<String, CachedResponse>>>,
    /// Connection pools for different providers
    connection_pools: Arc<RwLock<HashMap<String, ConnectionPool>>>,
    /// Performance metrics
    metrics: Arc<RwLock<PerformanceMetrics>>,
    /// Configuration
    config: AppConfig,
}

/// Cached API response
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CachedResponse {
    /// Response content
    pub content: String,
    /// Timestamp when cached
    pub timestamp: chrono::DateTime<chrono::Utc>,
    /// Time to live in seconds
    pub ttl_seconds: u64,
}

impl CachedResponse {
    /// Check if the cached response is still valid
    pub fn is_valid(&self) -> bool {
        let now = chrono::Utc::now();
        let duration = now.signed_duration_since(self.timestamp);
        duration.num_seconds() < self.ttl_seconds as i64
    }
}

/// Connection pool for API clients
#[derive(Debug)]
pub struct ConnectionPool {
    /// Pool of connections
    connections: Vec<PooledConnection>,
    /// Maximum pool size
    max_size: usize,
    /// Current index for round-robin
    current_index: std::sync::atomic::AtomicUsize,
}

impl Clone for ConnectionPool {
    fn clone(&self) -> Self {
        Self {
            connections: self.connections.clone(),
            max_size: self.max_size,
            current_index: std::sync::atomic::AtomicUsize::new(
                self.current_index.load(std::sync::atomic::Ordering::SeqCst),
            ),
        }
    }
}

/// Individual connection in the pool
#[derive(Debug, Clone)]
pub struct PooledConnection {
    /// HTTP client
    client: reqwest::Client,
    /// Last used timestamp
    last_used: Arc<Mutex<Instant>>,
    /// Whether the connection is healthy
    healthy: Arc<Mutex<bool>>,
}

impl ConnectionPool {
    /// Create a new connection pool
    pub fn new(max_size: usize, timeout: Duration) -> Self {
        let connections = (0..max_size)
            .map(|_| {
                let client = reqwest::Client::builder()
                    .timeout(timeout)
                    .build()
                    .unwrap_or_default();

                PooledConnection {
                    client,
                    last_used: Arc::new(Mutex::new(Instant::now())),
                    healthy: Arc::new(Mutex::new(true)),
                }
            })
            .collect();

        Self {
            connections,
            max_size,
            current_index: std::sync::atomic::AtomicUsize::new(0),
        }
    }

    /// Get a connection from the pool (round-robin)
    pub fn get_connection(&self) -> &PooledConnection {
        let index = self
            .current_index
            .fetch_add(1, std::sync::atomic::Ordering::SeqCst)
            % self.connections.len();

        let conn = &self.connections[index];

        // Update last used time
        if let Ok(mut last_used) = conn.last_used.try_lock() {
            *last_used = Instant::now();
        }

        conn
    }

    /// Check connection health
    pub async fn check_health(&self) -> bool {
        let mut healthy_count = 0;

        for conn in &self.connections {
            if let Ok(healthy) = conn.healthy.try_lock() {
                if *healthy {
                    healthy_count += 1;
                }
            }
        }

        healthy_count > 0
    }

    /// Remove unhealthy connections
    pub async fn cleanup_unhealthy(&mut self) {
        // In a real implementation, you might want to recreate unhealthy connections
        // For now, just mark them as healthy again
        for conn in &self.connections {
            if let Ok(mut healthy) = conn.healthy.try_lock() {
                *healthy = true;
            }
        }
    }
}

/// Performance metrics
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct PerformanceMetrics {
    /// Request counts by type
    pub request_counts: HashMap<String, u64>,
    /// Response times in milliseconds
    pub response_times: HashMap<String, Vec<u64>>,
    /// Cache hit/miss counts
    pub cache_stats: CacheStats,
    /// Error counts by type
    pub error_counts: HashMap<String, u64>,
    /// Connection pool stats
    pub connection_stats: HashMap<String, ConnectionPoolStats>,
    /// System resource usage
    pub system_stats: SystemStats,
}

/// Cache statistics
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct CacheStats {
    /// Number of cache hits
    pub hits: u64,
    /// Number of cache misses
    pub misses: u64,
    /// Cache hit rate
    pub hit_rate: f64,
}

/// Connection pool statistics
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct ConnectionPoolStats {
    /// Total connections
    pub total_connections: usize,
    /// Active connections
    pub active_connections: usize,
    /// Idle connections
    pub idle_connections: usize,
    /// Average response time
    pub avg_response_time_ms: u64,
}

/// System statistics
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct SystemStats {
    /// Memory usage in bytes
    pub memory_usage_bytes: u64,
    /// CPU usage percentage
    pub cpu_usage_percent: f64,
    /// Disk usage in bytes
    pub disk_usage_bytes: u64,
    /// Uptime in seconds
    pub uptime_seconds: u64,
}

impl PerformanceManager {
    /// Create a new performance manager
    pub fn new(config: AppConfig) -> Self {
        let embedding_cache_size = config.embeddings.max_cache_size;
        let response_cache_size = 1000; // Fixed size for response cache

        Self {
            embedding_cache: Arc::new(Mutex::new(LruCache::new(
                std::num::NonZero::new(embedding_cache_size)
                    .unwrap_or(std::num::NonZero::new(100).unwrap()),
            ))),
            response_cache: Arc::new(Mutex::new(LruCache::new(
                std::num::NonZero::new(response_cache_size)
                    .unwrap_or(std::num::NonZero::new(1000).unwrap()),
            ))),
            connection_pools: Arc::new(RwLock::new(HashMap::new())),
            metrics: Arc::new(RwLock::new(PerformanceMetrics::default())),
            config,
        }
    }

    /// Get or create a connection pool for a provider
    pub async fn get_connection_pool(&self, provider: &str) -> Result<ConnectionPool> {
        let mut pools = self.connection_pools.write().await;

        if let Some(pool) = pools.get(provider) {
            return Ok(pool.clone());
        }

        // Create new pool
        let timeout = Duration::from_secs(self.config.providers.timeout_seconds);
        let pool = ConnectionPool::new(self.config.performance.max_pool_size, timeout);

        pools.insert(provider.to_string(), pool.clone());
        Ok(pool)
    }

    /// Cache an embedding vector
    pub async fn cache_embedding(&self, key: String, vector: Vec<f32>) -> AppResult<()> {
        if !self.config.embeddings.cache_embeddings {
            return Ok(());
        }

        let mut cache = self.embedding_cache.lock().await;
        cache.put(key, vector);

        // Update metrics
        let mut metrics = self.metrics.write().await;
        metrics.cache_stats.misses += 1;
        metrics.update_cache_hit_rate();

        Ok(())
    }

    /// Get a cached embedding vector
    pub async fn get_cached_embedding(&self, key: &str) -> Option<Vec<f32>> {
        if !self.config.embeddings.cache_embeddings {
            return None;
        }

        let mut cache = self.embedding_cache.lock().await;
        if let Some(vector) = cache.get(key) {
            // Update metrics
            let mut metrics = self.metrics.write().await;
            metrics.cache_stats.hits += 1;
            metrics.update_cache_hit_rate();

            Some(vector.clone())
        } else {
            None
        }
    }

    /// Cache an API response
    pub async fn cache_response(
        &self,
        key: String,
        response: String,
        ttl_seconds: u64,
    ) -> AppResult<()> {
        if !self.config.performance.enable_response_cache {
            return Ok(());
        }

        let cached = CachedResponse {
            content: response,
            timestamp: chrono::Utc::now(),
            ttl_seconds,
        };

        let mut cache = self.response_cache.lock().await;
        cache.put(key, cached);

        Ok(())
    }

    /// Get a cached API response
    pub async fn get_cached_response(&self, key: &str) -> Option<String> {
        if !self.config.performance.enable_response_cache {
            return None;
        }

        let mut cache = self.response_cache.lock().await;
        if let Some(cached) = cache.get(key) {
            if cached.is_valid() {
                // Update metrics
                let mut metrics = self.metrics.write().await;
                metrics.cache_stats.hits += 1;
                metrics.update_cache_hit_rate();

                Some(cached.content.clone())
            } else {
                // Remove expired entry
                cache.pop(key);
                None
            }
        } else {
            let mut metrics = self.metrics.write().await;
            metrics.cache_stats.misses += 1;
            metrics.update_cache_hit_rate();
            None
        }
    }

    /// Record a request metric
    pub async fn record_request(&self, request_type: String, response_time_ms: u64) {
        let mut metrics = self.metrics.write().await;

        // Update request count
        *metrics
            .request_counts
            .entry(request_type.clone())
            .or_insert(0) += 1;

        // Update response time
        metrics
            .response_times
            .entry(request_type)
            .or_insert_with(Vec::new)
            .push(response_time_ms);
    }

    /// Record an error
    pub async fn record_error(&self, error_type: String) {
        let mut metrics = self.metrics.write().await;
        *metrics.error_counts.entry(error_type).or_insert(0) += 1;
    }

    /// Get current metrics
    pub async fn get_metrics(&self) -> PerformanceMetrics {
        self.metrics.read().await.clone()
    }

    /// Get performance summary
    pub async fn get_performance_summary(&self) -> PerformanceSummary {
        let metrics = self.metrics.read().await;

        let total_requests = metrics.request_counts.values().sum();
        let total_errors = metrics.error_counts.values().sum();
        let error_rate = if total_requests > 0 {
            (total_errors as f64 / total_requests as f64) * 100.0
        } else {
            0.0
        };

        let avg_response_time = metrics
            .response_times
            .values()
            .flat_map(|times| times.iter())
            .sum::<u64>() as f64
            / metrics
                .response_times
                .values()
                .map(|v| v.len() as u64)
                .sum::<u64>() as f64;

        PerformanceSummary {
            total_requests,
            total_errors,
            error_rate,
            avg_response_time_ms: avg_response_time,
            cache_hit_rate: metrics.cache_stats.hit_rate,
            uptime_seconds: metrics.system_stats.uptime_seconds,
        }
    }

    /// Cleanup expired cache entries and unhealthy connections
    pub async fn cleanup(&self) -> AppResult<()> {
        // Clean up response cache
        let mut response_cache = self.response_cache.lock().await;
        let mut keys_to_remove = Vec::new();

        for (key, cached) in response_cache.iter() {
            if !cached.is_valid() {
                keys_to_remove.push(key.clone());
            }
        }

        for key in keys_to_remove {
            response_cache.pop(&key);
        }

        // Clean up connection pools
        let mut pools = self.connection_pools.write().await;
        for pool in pools.values_mut() {
            pool.cleanup_unhealthy().await;
        }

        Ok(())
    }

    /// Generate cache key for embeddings
    pub fn generate_embedding_key(&self, text: &str, provider: &str, model: &str) -> String {
        format!("embedding:{}:{}:{}", provider, model, text)
    }

    /// Generate cache key for API responses
    pub fn generate_response_key(&self, prompt: &str, agent_id: i64, history_hash: u64) -> String {
        format!("response:{}:{}:{}", agent_id, history_hash, prompt)
    }
}

/// Performance summary
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceSummary {
    /// Total number of requests
    pub total_requests: u64,
    /// Total number of errors
    pub total_errors: u64,
    /// Error rate percentage
    pub error_rate: f64,
    /// Average response time in milliseconds
    pub avg_response_time_ms: f64,
    /// Cache hit rate
    pub cache_hit_rate: f64,
    /// System uptime in seconds
    pub uptime_seconds: u64,
}

impl PerformanceMetrics {
    /// Update cache hit rate
    fn update_cache_hit_rate(&mut self) {
        let total = self.cache_stats.hits + self.cache_stats.misses;
        self.cache_stats.hit_rate = if total > 0 {
            self.cache_stats.hits as f64 / total as f64
        } else {
            0.0
        };
    }
}

/// Request timing helper
pub struct RequestTimer {
    start_time: Instant,
    request_type: String,
    metrics: Arc<RwLock<PerformanceMetrics>>,
}

impl RequestTimer {
    /// Start timing a request
    pub fn start(request_type: String, metrics: Arc<RwLock<PerformanceMetrics>>) -> Self {
        Self {
            start_time: Instant::now(),
            request_type,
            metrics,
        }
    }

    /// Stop timing and record the duration
    pub async fn stop(self) {
        let duration = self.start_time.elapsed();
        let duration_ms = duration.as_millis() as u64;

        let mut metrics = self.metrics.write().await;
        *metrics
            .request_counts
            .entry(self.request_type.clone())
            .or_insert(0) += 1;
        metrics
            .response_times
            .entry(self.request_type)
            .or_insert_with(Vec::new)
            .push(duration_ms);
    }
}

/// Macro for timing requests
#[macro_export]
macro_rules! time_request {
    ($metrics:expr, $request_type:expr, $block:block) => {{
        let timer =
            $crate::performance::RequestTimer::start($request_type.to_string(), $metrics.clone());
        let result = $block;
        timer.stop().await;
        result
    }};
}

#[cfg(test)]
mod tests {
    use super::*;

    #[tokio::test]
    async fn test_embedding_cache() {
        let config = AppConfig::default();
        let perf_manager = PerformanceManager::new(config);

        let key = "test_key".to_string();
        let vector = vec![0.1, 0.2, 0.3];

        // Cache embedding
        perf_manager
            .cache_embedding(key.clone(), vector.clone())
            .await
            .unwrap();

        // Retrieve cached embedding
        let cached = perf_manager.get_cached_embedding(&key).await;
        assert_eq!(cached, Some(vector));

        // Test non-existent key
        let not_found = perf_manager.get_cached_embedding("non_existent").await;
        assert_eq!(not_found, None);
    }

    #[tokio::test]
    async fn test_response_cache() {
        let config = AppConfig::default();
        let perf_manager = PerformanceManager::new(config);

        let key = "test_response".to_string();
        let response = "test content".to_string();

        // Cache response
        perf_manager
            .cache_response(key.clone(), response.clone(), 300)
            .await
            .unwrap();

        // Retrieve cached response
        let cached = perf_manager.get_cached_response(&key).await;
        assert_eq!(cached, Some(response));

        // Test expired response
        let expired_key = "expired".to_string();
        perf_manager
            .cache_response(expired_key.clone(), "expired".to_string(), 0)
            .await
            .unwrap();

        // Wait a bit to ensure expiration
        tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;

        let expired = perf_manager.get_cached_response(&expired_key).await;
        assert_eq!(expired, None);
    }

    #[tokio::test]
    async fn test_connection_pool() {
        let pool = ConnectionPool::new(3, Duration::from_secs(30));

        assert_eq!(pool.connections.len(), 3);

        let conn1 = pool.get_connection();
        let conn2 = pool.get_connection();

        // Should be different connections (round-robin)
        assert!(!std::ptr::eq(conn1, conn2));
    }

    #[tokio::test]
    async fn test_metrics_recording() {
        let config = AppConfig::default();
        let perf_manager = PerformanceManager::new(config);

        perf_manager
            .record_request("test_request".to_string(), 100)
            .await;
        perf_manager.record_error("test_error".to_string()).await;

        let metrics = perf_manager.get_metrics().await;
        assert_eq!(metrics.request_counts.get("test_request"), Some(&1));
        assert_eq!(metrics.error_counts.get("test_error"), Some(&1));
    }
}
