use crate::config::LoggingConfig;
use crate::errors::{AppError, AppResult};
use crate::performance::PerformanceManager;
use anyhow::Result;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::io::Write;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn, Level, Span};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};

/// Structured log entry
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LogEntry {
    /// Timestamp
    pub timestamp: DateTime<Utc>,
    /// Log level
    pub level: LogLevel,
    /// Message
    pub message: String,
    /// Fields for structured logging
    pub fields: HashMap<String, serde_json::Value>,
    /// Span context
    pub span: Option<String>,
    /// Request ID if applicable
    pub request_id: Option<String>,
    /// User ID if applicable
    pub user_id: Option<String>,
}

/// Log level
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum LogLevel {
    Debug,
    Info,
    Warn,
    Error,
}

impl From<LogLevel> for tracing::Level {
    fn from(level: LogLevel) -> Self {
        match level {
            LogLevel::Debug => Level::DEBUG,
            LogLevel::Info => Level::INFO,
            LogLevel::Warn => Level::WARN,
            LogLevel::Error => Level::ERROR,
        }
    }
}

impl From<tracing::Level> for LogLevel {
    fn from(level: tracing::Level) -> Self {
        match level {
            Level::DEBUG => LogLevel::Debug,
            Level::INFO => LogLevel::Info,
            Level::WARN => LogLevel::Warn,
            Level::ERROR => LogLevel::Error,
            _ => LogLevel::Info,
        }
    }
}

/// Health check status
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealthStatus {
    /// Overall health status
    pub status: HealthState,
    /// Component statuses
    pub components: HashMap<String, ComponentHealth>,
    /// Timestamp
    pub timestamp: DateTime<Utc>,
    /// Version
    pub version: String,
    /// Uptime in seconds
    pub uptime_seconds: u64,
}

/// Health state
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum HealthState {
    Healthy,
    Degraded,
    Unhealthy,
}

/// Component health
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComponentHealth {
    /// Component name
    pub name: String,
    /// Health status
    pub status: HealthState,
    /// Status message
    pub message: Option<String>,
    /// Last checked
    pub last_checked: DateTime<Utc>,
    /// Response time in milliseconds
    pub response_time_ms: Option<u64>,
}

/// Alert definition
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Alert {
    /// Alert ID
    pub id: String,
    /// Alert type
    pub alert_type: AlertType,
    /// Severity level
    pub severity: AlertSeverity,
    /// Title
    pub title: String,
    /// Description
    pub description: String,
    /// Timestamp
    pub timestamp: DateTime<Utc>,
    /// Status
    pub status: AlertStatus,
    /// Additional data
    pub data: HashMap<String, serde_json::Value>,
}

/// Alert type
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AlertType {
    ErrorRateHigh,
    ResponseTimeHigh,
    MemoryUsageHigh,
    CpuUsageHigh,
    ServiceUnavailable,
    ConnectionFailed,
    Custom(String),
}

/// Alert severity
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum AlertSeverity {
    Info,
    Warning,
    Critical,
}

/// Alert status
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AlertStatus {
    Active,
    Resolved,
    Suppressed,
}

/// Monitoring and logging manager
pub struct MonitoringManager {
    /// Configuration
    config: LoggingConfig,
    /// Performance manager
    performance_manager: Arc<PerformanceManager>,
    /// Active alerts
    alerts: Arc<RwLock<Vec<Alert>>>,
    /// Log entries (for in-memory logging)
    log_entries: Arc<RwLock<Vec<LogEntry>>>,
    /// Health check results
    health_status: Arc<RwLock<HealthStatus>>,
    /// Application start time
    start_time: DateTime<Utc>,
    /// File writer for logging
    file_writer: Option<Arc<FileLogWriter>>,
}

/// File log writer
pub struct FileLogWriter {
    file_path: std::path::PathBuf,
    file: std::fs::File,
}

impl FileLogWriter {
    /// Create a new file log writer
    pub fn new(file_path: std::path::PathBuf) -> Result<Self> {
        let file = std::fs::OpenOptions::new()
            .create(true)
            .append(true)
            .open(&file_path)?;

        Ok(Self { file_path, file })
    }

    /// Write a log entry to file
    pub fn write(&mut self, entry: &LogEntry) -> Result<()> {
        let json = serde_json::to_string(entry)?;
        writeln!(self.file, "{}", json)?;
        self.file.flush()?;
        Ok(())
    }
}

impl MonitoringManager {
    /// Create a new monitoring manager
    pub fn new(
        config: LoggingConfig,
        performance_manager: Arc<PerformanceManager>,
    ) -> Result<Self> {
        let file_writer = if config.log_to_file {
            if let Some(log_file_path) = &config.log_file_path {
                Some(Arc::new(FileLogWriter::new(log_file_path.clone())?))
            } else {
                None
            }
        } else {
            None
        };

        let manager = Self {
            config,
            performance_manager,
            alerts: Arc::new(RwLock::new(Vec::new())),
            log_entries: Arc::new(RwLock::new(Vec::new())),
            health_status: Arc::new(RwLock::new(HealthStatus {
                status: HealthState::Healthy,
                components: HashMap::new(),
                timestamp: Utc::now(),
                version: env!("CARGO_PKG_VERSION").to_string(),
                uptime_seconds: 0,
            })),
            start_time: Utc::now(),
            file_writer,
        };

        Ok(manager)
    }

    /// Initialize logging system
    pub fn init_logging(&self) -> Result<()> {
        let level = match self.config.level.as_str() {
            "debug" => Level::DEBUG,
            "info" => Level::INFO,
            "warn" => Level::WARN,
            "error" => Level::ERROR,
            _ => Level::INFO,
        };

        if self.config.structured {
            // Structured logging
            tracing_subscriber::registry()
                .with(
                    tracing_subscriber::fmt::layer()
                        .json()
                        .with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE)
                        .with_current_span(true)
                        .with_timer(tracing_subscriber::fmt::time::ChronoLocal::rfc_3339()),
                )
                .with(tracing_subscriber::filter::LevelFilter::from_level(level))
                .init();
        } else {
            // Regular logging
            tracing_subscriber::registry()
                .with(
                    tracing_subscriber::fmt::layer()
                        .pretty()
                        .with_timer(tracing_subscriber::fmt::time::ChronoLocal::rfc_3339()),
                )
                .with(tracing_subscriber::filter::LevelFilter::from_level(level))
                .init();
        }

        info!("Logging system initialized with level: {}", level);
        Ok(())
    }

    /// Log a structured message
    pub async fn log(
        &self,
        level: LogLevel,
        message: String,
        fields: Option<HashMap<String, serde_json::Value>>,
        request_id: Option<String>,
        user_id: Option<String>,
    ) {
        let span = Span::current();
        let span_name = span.metadata().map(|m| m.name()).unwrap_or("unknown");

        let entry = LogEntry {
            timestamp: Utc::now(),
            level,
            message,
            fields: fields.unwrap_or_default(),
            span: Some(span_name.to_string()),
            request_id,
            user_id,
        };

        // Log using tracing
        let tracing_level: tracing::Level = level.into();
        match tracing_level {
            Level::DEBUG => debug!(message = %entry.message, ?entry.fields),
            Level::INFO => info!(message = %entry.message, ?entry.fields),
            Level::WARN => warn!(message = %entry.message, ?entry.fields),
            Level::ERROR => error!(message = %entry.message, ?entry.fields),
            _ => info!(message = %entry.message, ?entry.fields),
        }

        // Store in memory (limited size)
        {
            let mut entries = self.log_entries.write().await;
            entries.push(entry.clone());

            // Keep only last 1000 entries
            if entries.len() > 1000 {
                entries.remove(0);
            }
        }

        // Write to file if enabled
        if let Some(writer) = &self.file_writer {
            // Note: File writing would need to be handled differently since Arc doesn't allow mutable access
            // For now, we'll skip file writing in this async context
            // In a real implementation, you'd use a different approach like a channel
        }
    }

    /// Create an alert
    pub async fn create_alert(
        &self,
        alert_type: AlertType,
        severity: AlertSeverity,
        title: String,
        description: String,
        data: Option<HashMap<String, serde_json::Value>>,
    ) -> String {
        let alert_id = uuid::Uuid::new_v4().to_string();
        let alert = Alert {
            id: alert_id.clone(),
            alert_type,
            severity,
            title,
            description,
            timestamp: Utc::now(),
            status: AlertStatus::Active,
            data: data.unwrap_or_default(),
        };

        let mut alerts = self.alerts.write().await;
        alerts.push(alert.clone());

        // Log the alert
        let alert_id = &alert.id;
        self.log(
            LogLevel::Warn,
            format!("Alert created: {}", alert.title),
            Some({
                let mut fields = HashMap::new();
                fields.insert("alert_id".to_string(), alert_id.clone().into());
                fields.insert(
                    "alert_type".to_string(),
                    format!("{:?}", alert.alert_type).into(),
                );
                fields.insert(
                    "severity".to_string(),
                    format!("{:?}", alert.severity).into(),
                );
                fields
            }),
            None,
            None,
        )
        .await;

        alert.id.clone()
    }

    /// Resolve an alert
    pub async fn resolve_alert(&self, alert_id: &str) -> AppResult<()> {
        let mut alerts = self.alerts.write().await;
        if let Some(alert) = alerts.iter_mut().find(|a| a.id == alert_id) {
            alert.status = AlertStatus::Resolved;

            let alert_id = &alert.id;
            self.log(
                LogLevel::Info,
                format!("Alert resolved: {}", alert.title),
                Some({
                    let mut fields = HashMap::new();
                    fields.insert("alert_id".to_string(), alert_id.clone().into());
                    fields
                }),
                None,
                None,
            )
            .await;

            Ok(())
        } else {
            Err(AppError::not_found(format!(
                "Alert not found: {}",
                alert_id
            )))
        }
    }

    /// Get active alerts
    pub async fn get_active_alerts(&self) -> Vec<Alert> {
        let alerts = self.alerts.read().await;
        alerts
            .iter()
            .filter(|a| matches!(a.status, AlertStatus::Active))
            .cloned()
            .collect()
    }

    /// Get recent log entries
    pub async fn get_recent_logs(&self, limit: usize) -> Vec<LogEntry> {
        let entries = self.log_entries.read().await;
        let start_idx = if entries.len() > limit {
            entries.len() - limit
        } else {
            0
        };
        entries[start_idx..].to_vec()
    }

    /// Update health status for a component
    pub async fn update_component_health(
        &self,
        component_name: String,
        status: HealthState,
        message: Option<String>,
        response_time_ms: Option<u64>,
    ) {
        let mut health_status = self.health_status.write().await;

        health_status.components.insert(
            component_name.clone(),
            ComponentHealth {
                name: component_name,
                status,
                message,
                last_checked: Utc::now(),
                response_time_ms,
            },
        );

        // Update overall health status
        health_status.status = Self::calculate_overall_health(&health_status.components);
        health_status.uptime_seconds = (Utc::now() - self.start_time).num_seconds() as u64;
    }

    /// Calculate overall health status
    fn calculate_overall_health(components: &HashMap<String, ComponentHealth>) -> HealthState {
        if components.is_empty() {
            return HealthState::Healthy;
        }

        let mut unhealthy_count = 0;
        let mut degraded_count = 0;

        for component in components.values() {
            match component.status {
                HealthState::Unhealthy => unhealthy_count += 1,
                HealthState::Degraded => degraded_count += 1,
                HealthState::Healthy => {}
            }
        }

        if unhealthy_count > 0 {
            HealthState::Unhealthy
        } else if degraded_count > 0 {
            HealthState::Degraded
        } else {
            HealthState::Healthy
        }
    }

    /// Get current health status
    pub async fn get_health_status(&self) -> HealthStatus {
        self.health_status.read().await.clone()
    }

    /// Perform health checks
    pub async fn perform_health_checks(&self) -> AppResult<()> {
        // Check performance metrics
        let metrics = self.performance_manager.get_metrics().await;
        let summary = self.performance_manager.get_performance_summary().await;

        // Check error rate
        if summary.error_rate > 5.0 {
            self.create_alert(
                AlertType::ErrorRateHigh,
                AlertSeverity::Warning,
                "High error rate detected".to_string(),
                format!("Error rate is {:.2}%", summary.error_rate),
                Some({
                    let mut data = HashMap::new();
                    data.insert("error_rate".to_string(), summary.error_rate.into());
                    data.insert("total_requests".to_string(), summary.total_requests.into());
                    data.insert("total_errors".to_string(), summary.total_errors.into());
                    data
                }),
            )
            .await;
        }

        // Check response time
        if summary.avg_response_time_ms > 5000.0 {
            self.create_alert(
                AlertType::ResponseTimeHigh,
                AlertSeverity::Warning,
                "High response time detected".to_string(),
                format!(
                    "Average response time is {:.2}ms",
                    summary.avg_response_time_ms
                ),
                Some({
                    let mut data = HashMap::new();
                    data.insert(
                        "avg_response_time_ms".to_string(),
                        summary.avg_response_time_ms.into(),
                    );
                    data
                }),
            )
            .await;
        }

        // Update overall system health
        let overall_health = if summary.error_rate > 10.0 || summary.avg_response_time_ms > 10000.0
        {
            HealthState::Unhealthy
        } else if summary.error_rate > 5.0 || summary.avg_response_time_ms > 5000.0 {
            HealthState::Degraded
        } else {
            HealthState::Healthy
        };

        self.update_component_health(
            "system".to_string(),
            overall_health,
            Some(format!(
                "Error rate: {:.2}%, Response time: {:.2}ms",
                summary.error_rate, summary.avg_response_time_ms
            )),
            Some(summary.avg_response_time_ms as u64),
        )
        .await;

        Ok(())
    }

    /// Start monitoring loop
    pub async fn start_monitoring(&self) -> AppResult<()> {
        if !self.config.enable_metrics {
            return Ok(());
        }

        let interval_seconds = self.config.metrics_interval_seconds;
        let health_status = self.health_status.clone();
        let alerts = self.alerts.clone();
        let performance_manager = self.performance_manager.clone();

        tokio::spawn(async move {
            let mut interval =
                tokio::time::interval(tokio::time::Duration::from_secs(interval_seconds));

            loop {
                interval.tick().await;

                // Update uptime
                {
                    let mut health = health_status.write().await;
                    health.uptime_seconds = (Utc::now() - health.timestamp).num_seconds() as u64;
                    health.timestamp = Utc::now();
                }

                // Perform health checks
                if let Err(e) =
                    Self::perform_health_checks_inner(&health_status, &alerts, &performance_manager)
                        .await
                {
                    error!("Health check failed: {}", e);
                }

                // Cleanup old resolved alerts
                {
                    let mut alerts = alerts.write().await;
                    alerts.retain(|a| {
                        match a.status {
                            AlertStatus::Resolved => {
                                // Keep resolved alerts for 1 hour
                                Utc::now().signed_duration_since(a.timestamp).num_seconds() < 3600
                            }
                            AlertStatus::Suppressed => {
                                // Keep suppressed alerts for 24 hours
                                Utc::now().signed_duration_since(a.timestamp).num_seconds() < 86400
                            }
                            AlertStatus::Active => true,
                        }
                    });
                }
            }
        });

        info!(
            "Monitoring started with {} second interval",
            interval_seconds
        );
        Ok(())
    }

    /// Inner health check function
    async fn perform_health_checks_inner(
        health_status: &Arc<RwLock<HealthStatus>>,
        alerts: &Arc<RwLock<Vec<Alert>>>,
        performance_manager: &Arc<PerformanceManager>,
    ) -> Result<()> {
        // This would be implemented with actual health check logic
        // For now, just update the timestamp
        let mut health = health_status.write().await;
        health.timestamp = Utc::now();

        Ok(())
    }

    /// Get monitoring dashboard data
    pub async fn get_dashboard_data(&self) -> DashboardData {
        let health = self.get_health_status().await;
        let alerts = self.get_active_alerts().await;
        let logs = self.get_recent_logs(100).await;
        let metrics = self.performance_manager.get_metrics().await;
        let summary = self.performance_manager.get_performance_summary().await;

        DashboardData {
            health,
            active_alerts: alerts,
            recent_logs: logs,
            metrics,
            summary,
        }
    }
}

/// Dashboard data structure
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DashboardData {
    /// Health status
    pub health: HealthStatus,
    /// Active alerts
    pub active_alerts: Vec<Alert>,
    /// Recent log entries
    pub recent_logs: Vec<LogEntry>,
    /// Performance metrics
    pub metrics: crate::performance::PerformanceMetrics,
    /// Performance summary
    pub summary: crate::performance::PerformanceSummary,
}

/// Macro for convenient logging
#[macro_export]
macro_rules! log_event {
    ($monitoring:expr, $level:expr, $message:expr) => {
        $monitoring.log($level, $message.to_string(), None, None, None).await
    };
    ($monitoring:expr, $level:expr, $message:expr, $($field:ident = $value:expr),*) => {
        {
            let mut fields = std::collections::HashMap::new();
            $(
                fields.insert(stringify!($field).to_string(), serde_json::json!($value));
            )*
            $monitoring.log($level, $message.to_string(), Some(fields), None, None).await
        }
    };
}

#[cfg(test)]
mod tests {
    use super::*;

    #[tokio::test]
    async fn test_logging() {
        let config = LoggingConfig::default();
        let perf_manager = Arc::new(PerformanceManager::new(AppConfig::default()));
        let monitoring = MonitoringManager::new(config, perf_manager).unwrap();

        monitoring
            .log(LogLevel::Info, "Test message".to_string(), None, None, None)
            .await;

        let logs = monitoring.get_recent_logs(10).await;
        assert!(!logs.is_empty());
        assert_eq!(logs.last().unwrap().message, "Test message");
    }

    #[tokio::test]
    async fn test_alerts() {
        let config = LoggingConfig::default();
        let perf_manager = Arc::new(PerformanceManager::new(AppConfig::default()));
        let monitoring = MonitoringManager::new(config, perf_manager).unwrap();

        let alert_id = monitoring
            .create_alert(
                AlertType::ErrorRateHigh,
                AlertSeverity::Warning,
                "Test Alert".to_string(),
                "This is a test alert".to_string(),
                None,
            )
            .await;

        let alerts = monitoring.get_active_alerts().await;
        assert_eq!(alerts.len(), 1);
        assert_eq!(alerts[0].id, alert_id);

        monitoring.resolve_alert(&alert_id).await.unwrap();
        let active_alerts = monitoring.get_active_alerts().await;
        assert!(active_alerts.is_empty());
    }

    #[tokio::test]
    async fn test_health_status() {
        let config = LoggingConfig::default();
        let perf_manager = Arc::new(PerformanceManager::new(AppConfig::default()));
        let monitoring = MonitoringManager::new(config, perf_manager).unwrap();

        monitoring
            .update_component_health(
                "test_component".to_string(),
                HealthState::Healthy,
                Some("All good".to_string()),
                Some(100),
            )
            .await;

        let health = monitoring.get_health_status().await;
        assert_eq!(health.status, HealthState::Healthy);
        assert!(health.components.contains_key("test_component"));
    }
}
