// Performance monitoring and metrics collection
use crate::Result;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime};
use tokio::sync::RwLock;
use tokio::time::interval;

use sysinfo::System;

/// Performance monitoring system
pub struct PerformanceMonitor {
    /// Metrics storage
    metrics: Arc<RwLock<MetricsStorage>>,
    /// Configuration
    config: MonitorConfig,
    /// Start time for uptime calculation
    start_time: Instant,
    /// System information collector
    system: Arc<RwLock<System>>,
    /// Background monitoring task handle
    monitoring_handle: Option<tokio::task::JoinHandle<()>>,
}

/// Configuration for performance monitoring
#[derive(Debug, Clone)]
pub struct MonitorConfig {
    /// Maximum number of samples to keep for each metric
    pub max_samples: usize,
    /// Sampling interval in milliseconds
    pub sampling_interval_ms: u64,
    /// Collection interval for background monitoring in milliseconds
    pub collection_interval_ms: u64,
    /// Whether to enable detailed profiling
    pub enable_profiling: bool,
    /// Whether to enable memory tracking
    pub enable_memory_tracking: bool,
    /// Whether to enable I/O tracking
    pub enable_io_tracking: bool,
    /// Whether to enable system metrics monitoring
    pub enable_system_monitoring: bool,
    /// Whether to enable real-time monitoring
    pub enable_realtime_monitoring: bool,
}

impl Default for MonitorConfig {
    fn default() -> Self {
        Self {
            max_samples: 1000,
            sampling_interval_ms: 100,
            collection_interval_ms: 1000,
            enable_profiling: true,
            enable_memory_tracking: true,
            enable_io_tracking: true,
            enable_system_monitoring: true,
            enable_realtime_monitoring: false,
        }
    }
}

/// Metrics storage for different types of performance data
#[derive(Debug, Default)]
pub struct MetricsStorage {
    /// Operation performance metrics
    pub operation_metrics: TimeSeries<OperationMetric>,
    /// Memory usage metrics
    pub memory_metrics: TimeSeries<MemoryMetric>,
    /// System-wide metrics
    pub system_metrics: TimeSeries<SystemMetric>,
    /// I/O metrics
    pub io_metrics: TimeSeries<IoMetric>,
    /// Custom metrics
    pub custom_metrics: HashMap<String, TimeSeries<f64>>,
}

/// Time series data structure for storing metrics
#[derive(Debug)]
pub struct TimeSeries<T> {
    /// Maximum number of samples to keep
    max_samples: usize,
    /// Stored samples with timestamps
    samples: VecDeque<(SystemTime, T)>,
}

impl<T> Default for TimeSeries<T> {
    fn default() -> Self {
        Self::new(1000)
    }
}

impl<T> TimeSeries<T> {
    pub fn new(max_samples: usize) -> Self {
        Self {
            max_samples,
            samples: VecDeque::with_capacity(max_samples),
        }
    }

    pub fn add_sample(&mut self, sample: T) {
        let timestamp = SystemTime::now();
        self.samples.push_back((timestamp, sample));
        
        if self.samples.len() > self.max_samples {
            self.samples.pop_front();
        }
    }

    pub fn get_samples(&self) -> &VecDeque<(SystemTime, T)> {
        &self.samples
    }

    pub fn latest(&self) -> Option<&T> {
        self.samples.back().map(|(_, sample)| sample)
    }

    pub fn len(&self) -> usize {
        self.samples.len()
    }

    pub fn is_empty(&self) -> bool {
        self.samples.is_empty()
    }
}

/// Operation performance metric
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OperationMetric {
    pub operation_name: String,
    pub duration_ms: f64,
    pub success: bool,
    pub error_message: Option<String>,
    pub metadata: HashMap<String, String>,
}

/// Memory usage metric
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryMetric {
    pub heap_used_bytes: u64,
    pub heap_total_bytes: u64,
    pub stack_used_bytes: u64,
    pub rss_bytes: u64,
    pub virtual_memory_bytes: u64,
}

/// System metric
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SystemMetric {
    pub cpu_usage_percent: f64,
    pub memory_usage_percent: f64,
    pub disk_usage_percent: f64,
    pub network_bytes_in: u64,
    pub network_bytes_out: u64,
}

/// I/O metric
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IoMetric {
    pub read_bytes: u64,
    pub write_bytes: u64,
    pub read_operations: u64,
    pub write_operations: u64,
}

/// Performance report containing aggregated metrics
#[derive(Debug, Serialize, Deserialize)]
pub struct PerformanceReport {
    pub timestamp: SystemTime,
    pub uptime_seconds: f64,
    pub operation_stats: OperationStats,
    pub memory_stats: MemoryStats,
    pub system_stats: SystemStats,
    pub io_stats: IoStats,
}

/// Aggregated operation statistics
#[derive(Debug, Serialize, Deserialize)]
pub struct OperationStats {
    pub total_operations: u64,
    pub successful_operations: u64,
    pub failed_operations: u64,
    pub average_duration_ms: f64,
    pub min_duration_ms: f64,
    pub max_duration_ms: f64,
    pub operations_per_second: f64,
}

/// Aggregated memory statistics
#[derive(Debug, Serialize, Deserialize)]
pub struct MemoryStats {
    pub current_heap_used_bytes: u64,
    pub current_heap_total_bytes: u64,
    pub peak_heap_used_bytes: u64,
    pub average_heap_usage_percent: f64,
    pub current_rss_bytes: u64,
    pub peak_rss_bytes: u64,
}

/// Aggregated system statistics
#[derive(Debug, Serialize, Deserialize)]
pub struct SystemStats {
    pub current_cpu_usage_percent: f64,
    pub average_cpu_usage_percent: f64,
    pub peak_cpu_usage_percent: f64,
    pub current_memory_usage_percent: f64,
    pub average_memory_usage_percent: f64,
    pub current_disk_usage_percent: f64,
    pub total_network_bytes_in: u64,
    pub total_network_bytes_out: u64,
}

/// Aggregated I/O statistics
#[derive(Debug, Serialize, Deserialize)]
pub struct IoStats {
    pub total_read_bytes: u64,
    pub total_write_bytes: u64,
    pub total_read_operations: u64,
    pub total_write_operations: u64,
    pub average_read_throughput_bps: f64,
    pub average_write_throughput_bps: f64,
}

impl PerformanceMonitor {
    /// Create a new performance monitor with default configuration
    pub fn new() -> Self {
        let system = System::new_all();

        Self {
            metrics: Arc::new(RwLock::new(MetricsStorage::default())),
            config: MonitorConfig::default(),
            start_time: Instant::now(),
            system: Arc::new(RwLock::new(system)),
            monitoring_handle: None,
        }
    }

    /// Create a new performance monitor with custom configuration
    pub fn with_config(config: MonitorConfig) -> Self {
        let system = System::new_all();

        Self {
            metrics: Arc::new(RwLock::new(MetricsStorage::default())),
            config,
            start_time: Instant::now(),
            system: Arc::new(RwLock::new(system)),
            monitoring_handle: None,
        }
    }

    /// Start background monitoring
    pub async fn start_monitoring(&mut self) -> Result<()> {
        if self.monitoring_handle.is_some() {
            return Ok(()); // Already monitoring
        }

        let metrics = Arc::clone(&self.metrics);
        let system = Arc::clone(&self.system);
        let config = self.config.clone();

        let handle = tokio::spawn(async move {
            let mut interval = interval(Duration::from_millis(config.collection_interval_ms));

            loop {
                interval.tick().await;

                if config.enable_system_monitoring {
                    if let Err(e) = Self::collect_system_metrics_background(&metrics, &system).await {
                        tracing::error!("Failed to collect system metrics: {}", e);
                    }
                }

                if config.enable_memory_tracking {
                    if let Err(e) = Self::collect_memory_metrics_background(&metrics, &system).await {
                        tracing::error!("Failed to collect memory metrics: {}", e);
                    }
                }
            }
        });

        self.monitoring_handle = Some(handle);
        Ok(())
    }

    /// Stop background monitoring
    pub async fn stop_monitoring(&mut self) {
        if let Some(handle) = self.monitoring_handle.take() {
            handle.abort();
        }
    }

    /// Background system metrics collection
    async fn collect_system_metrics_background(
        metrics: &Arc<RwLock<MetricsStorage>>,
        system: &Arc<RwLock<System>>,
    ) -> Result<()> {
        let mut sys = system.write().await;
        sys.refresh_all();

        // Get CPU usage - calculate average from all CPUs
        let cpu_usage_percent = if !sys.cpus().is_empty() {
            sys.cpus().iter()
                .map(|cpu| cpu.cpu_usage())
                .sum::<f32>() as f64 / sys.cpus().len() as f64
        } else {
            0.0
        };

        // Get memory usage
        let total_memory = sys.total_memory();
        let used_memory = sys.used_memory();
        let memory_usage_percent = if total_memory > 0 {
            (used_memory as f64 / total_memory as f64) * 100.0
        } else {
            0.0
        };

        // Get disk usage (simplified - just return 0 for now)
        let disk_usage_percent = 0.0;

        // Get network usage (simplified - just return 0 for now)
        let network_bytes_in = 0u64;
        let network_bytes_out = 0u64;

        let metric = SystemMetric {
            cpu_usage_percent,
            memory_usage_percent,
            disk_usage_percent,
            network_bytes_in,
            network_bytes_out,
        };

        let mut metrics_storage = metrics.write().await;
        metrics_storage.system_metrics.add_sample(metric);

        Ok(())
    }

    /// Background memory metrics collection
    async fn collect_memory_metrics_background(
        metrics: &Arc<RwLock<MetricsStorage>>,
        system: &Arc<RwLock<System>>,
    ) -> Result<()> {
        let sys = system.read().await;
        
        // For now, just use system memory info
        let heap_used_bytes = sys.used_memory();
        let heap_total_bytes = sys.total_memory();
        let stack_used_bytes = 0u64; // Not easily available
        let rss_bytes = sys.used_memory();
        let virtual_memory_bytes = sys.total_memory();

        let metric = MemoryMetric {
            heap_used_bytes,
            heap_total_bytes,
            stack_used_bytes,
            rss_bytes,
            virtual_memory_bytes,
        };

        let mut metrics_storage = metrics.write().await;
        metrics_storage.memory_metrics.add_sample(metric);

        Ok(())
    }

    /// Record an operation metric
    pub async fn record_operation(&self, metric: OperationMetric) {
        let mut metrics = self.metrics.write().await;
        metrics.operation_metrics.add_sample(metric);
    }

    /// Record a custom metric
    pub async fn record_custom_metric(&self, name: String, value: f64) {
        let mut metrics = self.metrics.write().await;
        metrics.custom_metrics
            .entry(name)
            .or_insert_with(|| TimeSeries::new(self.config.max_samples))
            .add_sample(value);
    }

    /// Get current performance report
    pub async fn get_report(&self) -> PerformanceReport {
        let metrics = self.metrics.read().await;
        let uptime_seconds = self.start_time.elapsed().as_secs_f64();

        let operation_stats = self.calculate_operation_stats(&metrics.operation_metrics);
        let memory_stats = self.calculate_memory_stats(&metrics.memory_metrics);
        let system_stats = self.calculate_system_stats(&metrics.system_metrics);
        let io_stats = self.calculate_io_stats(&metrics.io_metrics);

        PerformanceReport {
            timestamp: SystemTime::now(),
            uptime_seconds,
            operation_stats,
            memory_stats,
            system_stats,
            io_stats,
        }
    }

    /// Calculate operation statistics
    fn calculate_operation_stats(&self, metrics: &TimeSeries<OperationMetric>) -> OperationStats {
        let samples = metrics.get_samples();

        if samples.is_empty() {
            return OperationStats {
                total_operations: 0,
                successful_operations: 0,
                failed_operations: 0,
                average_duration_ms: 0.0,
                min_duration_ms: 0.0,
                max_duration_ms: 0.0,
                operations_per_second: 0.0,
            };
        }

        let total_operations = samples.len() as u64;
        let successful_operations = samples.iter()
            .filter(|(_, metric)| metric.success)
            .count() as u64;
        let failed_operations = total_operations - successful_operations;

        let durations: Vec<f64> = samples.iter()
            .map(|(_, metric)| metric.duration_ms)
            .collect();

        let average_duration_ms = durations.iter().sum::<f64>() / durations.len() as f64;
        let min_duration_ms = durations.iter().fold(f64::INFINITY, |a, &b| a.min(b));
        let max_duration_ms = durations.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b));

        // Calculate operations per second based on recent samples
        let operations_per_second = if samples.len() >= 2 {
            let time_span = samples.back().unwrap().0
                .duration_since(samples.front().unwrap().0)
                .unwrap_or(Duration::from_secs(1))
                .as_secs_f64();
            if time_span > 0.0 {
                samples.len() as f64 / time_span
            } else {
                0.0
            }
        } else {
            0.0
        };

        OperationStats {
            total_operations,
            successful_operations,
            failed_operations,
            average_duration_ms,
            min_duration_ms,
            max_duration_ms,
            operations_per_second,
        }
    }

    /// Calculate memory statistics
    fn calculate_memory_stats(&self, metrics: &TimeSeries<MemoryMetric>) -> MemoryStats {
        let samples = metrics.get_samples();

        if samples.is_empty() {
            return MemoryStats {
                current_heap_used_bytes: 0,
                current_heap_total_bytes: 0,
                peak_heap_used_bytes: 0,
                average_heap_usage_percent: 0.0,
                current_rss_bytes: 0,
                peak_rss_bytes: 0,
            };
        }

        let latest = &samples.back().unwrap().1;
        let current_heap_used_bytes = latest.heap_used_bytes;
        let current_heap_total_bytes = latest.heap_total_bytes;
        let current_rss_bytes = latest.rss_bytes;

        let peak_heap_used_bytes = samples.iter()
            .map(|(_, metric)| metric.heap_used_bytes)
            .max()
            .unwrap_or(0);

        let peak_rss_bytes = samples.iter()
            .map(|(_, metric)| metric.rss_bytes)
            .max()
            .unwrap_or(0);

        let average_heap_usage_percent = if current_heap_total_bytes > 0 {
            let total_usage: f64 = samples.iter()
                .map(|(_, metric)| {
                    if metric.heap_total_bytes > 0 {
                        (metric.heap_used_bytes as f64 / metric.heap_total_bytes as f64) * 100.0
                    } else {
                        0.0
                    }
                })
                .sum();
            total_usage / samples.len() as f64
        } else {
            0.0
        };

        MemoryStats {
            current_heap_used_bytes,
            current_heap_total_bytes,
            peak_heap_used_bytes,
            average_heap_usage_percent,
            current_rss_bytes,
            peak_rss_bytes,
        }
    }

    /// Calculate system statistics
    fn calculate_system_stats(&self, metrics: &TimeSeries<SystemMetric>) -> SystemStats {
        let samples = metrics.get_samples();

        if samples.is_empty() {
            return SystemStats {
                current_cpu_usage_percent: 0.0,
                average_cpu_usage_percent: 0.0,
                peak_cpu_usage_percent: 0.0,
                current_memory_usage_percent: 0.0,
                average_memory_usage_percent: 0.0,
                current_disk_usage_percent: 0.0,
                total_network_bytes_in: 0,
                total_network_bytes_out: 0,
            };
        }

        let latest = &samples.back().unwrap().1;
        let current_cpu_usage_percent = latest.cpu_usage_percent;
        let current_memory_usage_percent = latest.memory_usage_percent;
        let current_disk_usage_percent = latest.disk_usage_percent;

        let average_cpu_usage_percent = samples.iter()
            .map(|(_, metric)| metric.cpu_usage_percent)
            .sum::<f64>() / samples.len() as f64;

        let peak_cpu_usage_percent = samples.iter()
            .map(|(_, metric)| metric.cpu_usage_percent)
            .fold(0.0f64, |a, b| a.max(b));

        let average_memory_usage_percent = samples.iter()
            .map(|(_, metric)| metric.memory_usage_percent)
            .sum::<f64>() / samples.len() as f64;

        let total_network_bytes_in = samples.iter()
            .map(|(_, metric)| metric.network_bytes_in)
            .sum();

        let total_network_bytes_out = samples.iter()
            .map(|(_, metric)| metric.network_bytes_out)
            .sum();

        SystemStats {
            current_cpu_usage_percent,
            average_cpu_usage_percent,
            peak_cpu_usage_percent,
            current_memory_usage_percent,
            average_memory_usage_percent,
            current_disk_usage_percent,
            total_network_bytes_in,
            total_network_bytes_out,
        }
    }

    /// Calculate I/O statistics
    fn calculate_io_stats(&self, metrics: &TimeSeries<IoMetric>) -> IoStats {
        let samples = metrics.get_samples();

        if samples.is_empty() {
            return IoStats {
                total_read_bytes: 0,
                total_write_bytes: 0,
                total_read_operations: 0,
                total_write_operations: 0,
                average_read_throughput_bps: 0.0,
                average_write_throughput_bps: 0.0,
            };
        }

        let total_read_bytes = samples.iter()
            .map(|(_, metric)| metric.read_bytes)
            .sum();

        let total_write_bytes = samples.iter()
            .map(|(_, metric)| metric.write_bytes)
            .sum();

        let total_read_operations = samples.iter()
            .map(|(_, metric)| metric.read_operations)
            .sum();

        let total_write_operations = samples.iter()
            .map(|(_, metric)| metric.write_operations)
            .sum();

        // Calculate throughput based on time span
        let (average_read_throughput_bps, average_write_throughput_bps) = if samples.len() >= 2 {
            let time_span = samples.back().unwrap().0
                .duration_since(samples.front().unwrap().0)
                .unwrap_or(Duration::from_secs(1))
                .as_secs_f64();

            if time_span > 0.0 {
                (
                    total_read_bytes as f64 / time_span,
                    total_write_bytes as f64 / time_span,
                )
            } else {
                (0.0, 0.0)
            }
        } else {
            (0.0, 0.0)
        };

        IoStats {
            total_read_bytes,
            total_write_bytes,
            total_read_operations,
            total_write_operations,
            average_read_throughput_bps,
            average_write_throughput_bps,
        }
    }

    /// Get metrics storage (for advanced usage)
    pub async fn get_metrics(&self) -> tokio::sync::RwLockReadGuard<MetricsStorage> {
        self.metrics.read().await
    }

    /// Clear all metrics
    pub async fn clear_metrics(&self) {
        let mut metrics = self.metrics.write().await;
        *metrics = MetricsStorage::default();
    }
}
