use crate::consumer::{ConsumeLogs, ConsumeMetrics, ConsumeTraces};
use crate::pipeline::{LogsProcessor, MetricsProcessor, TracesProcessor};
use pdata::internal::data::protogen::collector::metrics::v1::ExportMetricsServiceRequest;
use pdata::internal::data::protogen::collector::trace::v1::ExportTraceServiceRequest;
use pdata::internal::data::protogen::collector::logs::v1::ExportLogsServiceRequest;
use serde::Deserialize;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use tokio::time;
use tracing::info;

/// Configuration for the memory limiter processor.
#[derive(Debug, Clone, Deserialize)]
pub struct MemoryLimiterConfig {
    pub check_interval: u64,
    pub limit_mib: u64,
    pub spike_limit_mib: u64,
}

impl Default for MemoryLimiterConfig {
    fn default() -> Self {
        Self {
            check_interval: 1,
            limit_mib: 100,
            spike_limit_mib: 20,
        }
    }
}

/// Memory limiter processor that monitors system memory usage and drops data when limits are exceeded.
pub struct MemoryLimiter {
    config: MemoryLimiterConfig,
    memory_exceeded: Arc<std::sync::atomic::AtomicBool>,
    latest_error: Arc<RwLock<Option<String>>>,
    _task_handle: Arc<tokio::task::JoinHandle<()>>,
}

impl MemoryLimiter {
    /// Create a new memory limiter with the given configuration.
    pub fn new(config: MemoryLimiterConfig) -> Self {
        let check_interval = config.check_interval;
        let memory_exceeded = Arc::new(std::sync::atomic::AtomicBool::new(false));
        let last_error = Arc::new(RwLock::new(None));
        let memory_exceeded_clone = memory_exceeded.clone();
        let last_error_clone = last_error.clone();

        let task_handle = Arc::new(tokio::spawn(async move {
            let mut interval = time::interval(Duration::from_secs(check_interval));
            let previous_memory_usage = Arc::new(std::sync::atomic::AtomicU64::new(0));

            loop {
                interval.tick().await;

                if let Ok(current_memory_usage) = Self::get_memory_usage() {
                    let mut exceeded = false;
                    let mut error_msg = None;

                    info!("current_memory_usage: {}", current_memory_usage);

                    // condition1: current_memory_usage > config.limit_mib
                    if current_memory_usage > config.limit_mib {
                        exceeded = true;
                        error_msg = Some(format!(
                            "Memory limit exceeded: {} MiB (threshold: {} MiB)",
                            current_memory_usage, config.limit_mib
                        ));
                    }

                    // condition2: current_memory_usage - previous_memory_usage > config.spike_limit_mib
                    let prev = previous_memory_usage.load(std::sync::atomic::Ordering::Relaxed);
                    if prev > 0 {
                        let spike = current_memory_usage.saturating_sub(prev);
                        if spike > config.spike_limit_mib {
                            exceeded = true;
                            error_msg = Some(format!(
                                "Memory spike detected: +{} MiB (threshold: {} MiB)",
                                spike, config.spike_limit_mib
                            ));
                        }
                    }

                    memory_exceeded_clone.store(exceeded, std::sync::atomic::Ordering::Relaxed);
                    *last_error_clone.write().unwrap() = error_msg;
                    previous_memory_usage
                        .store(current_memory_usage, std::sync::atomic::Ordering::Relaxed);
                }
            }
        }));

        Self {
            config,
            memory_exceeded,
            latest_error: last_error,
            _task_handle: task_handle,
        }
    }

    fn get_memory_usage() -> Result<u64, Box<dyn std::error::Error>> {
        use std::fs;

        // 方法1：读取 /proc/self/status 获取 VmRSS (Resident Set Size)
        if let Ok(status_content) = fs::read_to_string("/proc/self/status") {
            for line in status_content.lines() {
                if line.starts_with("VmRSS:") {
                    if let Some(kb_str) = line.split_whitespace().nth(1) {
                        if let Ok(kb) = kb_str.parse::<u64>() {
                            return Ok(kb / 1024); // 转换为 MiB
                        }
                    }
                }
            }
        }

        // 方法2：备选方案，读取 /proc/self/statm
        // statm 格式: size resident share text lib data dt
        if let Ok(statm_content) = fs::read_to_string("/proc/self/statm") {
            if let Some(resident_str) = statm_content.split_whitespace().nth(1) {
                if let Ok(pages) = resident_str.parse::<u64>() {
                    // 转换为 KB (Linux 默认页面大小为 4KB)
                    let kb = pages * 4;
                    return Ok(kb / 1024); // 转换为 MiB
                }
            }
        }

        Err("Unable to read process memory information".into())
    }

    fn must_refuse(&self) -> bool {
        self.memory_exceeded
            .load(std::sync::atomic::Ordering::Relaxed)
    }

    fn get_last_error(&self) -> Option<String> {
        self.latest_error.read().unwrap().clone()
    }
}

impl Clone for MemoryLimiter {
    fn clone(&self) -> Self {
        Self {
            config: self.config.clone(),
            memory_exceeded: Arc::clone(&self.memory_exceeded),
            latest_error: Arc::clone(&self.latest_error),
            // keep reference and control of background task, ensure task bind with MemoryLimiter's lifecycle
            _task_handle: Arc::clone(&self._task_handle),
        }
    }
}

// Traces
struct MemoryLimitedTracesConsumer {
    limiter: Arc<MemoryLimiter>,
    next: Arc<dyn ConsumeTraces + Send + Sync + 'static>,
}

impl TracesProcessor for MemoryLimiter {
    fn wrap(
        &self,
        next: Arc<dyn ConsumeTraces + Send + Sync + 'static>,
    ) -> Arc<dyn ConsumeTraces + Send + Sync + 'static> {
        Arc::new(MemoryLimitedTracesConsumer {
            limiter: Arc::new((*self).clone()),
            next,
        })
    }
}

#[tonic::async_trait]
impl ConsumeTraces for MemoryLimitedTracesConsumer {
    async fn consume(&self, data: ExportTraceServiceRequest) {
        if self.limiter.must_refuse() {
            if let Some(error_msg) = self.limiter.get_last_error() {
                eprintln!("{}, dropping traces batch", error_msg);
            }
            return;
        }
        self.next.consume(data).await;
    }
}

// Metrics
struct MemoryLimitedMetricsConsumer{
    limiter: Arc<MemoryLimiter>,
    next: Arc<dyn ConsumeMetrics + Send + Sync + 'static>,
}

impl MetricsProcessor for MemoryLimiter {
    fn wrap(
        &self,
        next: Arc<dyn ConsumeMetrics + Send + Sync + 'static>,
    ) -> Arc<dyn ConsumeMetrics + Send + Sync + 'static> {
        Arc::new(MemoryLimitedMetricsConsumer {
            limiter: Arc::new((*self).clone()),
            next,
        })
    }
}

#[tonic::async_trait]
impl ConsumeMetrics for MemoryLimitedMetricsConsumer {
    async fn consume(&self, data: ExportMetricsServiceRequest) {
        if self.limiter.must_refuse() {
            if let Some(error_msg) = self.limiter.get_last_error() {
                eprintln!("{}, dropping metrics batch", error_msg);
            }
            return;
        }
        self.next.consume(data).await;
    }
}

// Logs
struct MemoryLimitedLogsConsumer {
    limiter: Arc<MemoryLimiter>,
    next: Arc<dyn ConsumeLogs + Send + Sync + 'static>,
}

impl LogsProcessor for MemoryLimiter {
    fn wrap(
        &self,
        next: Arc<dyn ConsumeLogs + Send + Sync + 'static>,
    ) -> Arc<dyn ConsumeLogs + Send + Sync + 'static> {
        Arc::new(MemoryLimitedLogsConsumer {
            limiter: Arc::new((*self).clone()),
            next,
        })
    }
}

#[tonic::async_trait]
impl ConsumeLogs for MemoryLimitedLogsConsumer {
    async fn consume(&self, data: ExportLogsServiceRequest) {
        if self.limiter.must_refuse() {
            if let Some(error_msg) = self.limiter.get_last_error() {
                eprintln!("{}, dropping logs batch", error_msg);
            }
            return;
        }
        self.next.consume(data).await;
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    use std::time::Duration;
    use tokio::time;

    #[tokio::test]
    async fn test_memory_limiter_basic_functionality() {
        let config = MemoryLimiterConfig {
            check_interval: 1,
            limit_mib: 10000, // 设置较大的限制避免触发
            spike_limit_mib: 1000,
        };

        let limiter = MemoryLimiter::new(config);

        assert!(!limiter.must_refuse());

        time::sleep(Duration::from_millis(200)).await;

        assert!(!limiter.must_refuse());
    }

    #[tokio::test]
    async fn test_memory_limiter_error_message() {
        let config = MemoryLimiterConfig {
            check_interval: 1,
            limit_mib: 10000, // 设置较大的限制避免触发
            spike_limit_mib: 1000,
        };

        let limiter = MemoryLimiter::new(config);

        assert!(limiter.get_last_error().is_none());

        time::sleep(Duration::from_millis(200)).await;

        let _ = limiter.get_last_error();
    }

    #[tokio::test]
    async fn test_memory_limiter_clone() {
        let config = MemoryLimiterConfig {
            check_interval: 1,
            limit_mib: 100,
            spike_limit_mib: 10,
        };

        let limiter1 = MemoryLimiter::new(config.clone());
        let limiter2 = limiter1.clone();

        // 两个实例应该有相同的配置
        assert_eq!(
            limiter1.config.check_interval,
            limiter2.config.check_interval
        );
        assert_eq!(limiter1.config.limit_mib, limiter2.config.limit_mib);
        assert_eq!(
            limiter1.config.spike_limit_mib,
            limiter2.config.spike_limit_mib
        );

        // 但它们应该是独立的实例
        assert!(!Arc::ptr_eq(
            &limiter1.memory_exceeded,
            &limiter2.memory_exceeded
        ));
    }

    #[test]
    fn test_memory_limiter_config_clone() {
        let config = MemoryLimiterConfig {
            check_interval: 5,
            limit_mib: 500,
            spike_limit_mib: 100,
        };

        let config_clone = config.clone();

        assert_eq!(config.check_interval, config_clone.check_interval);
        assert_eq!(config.limit_mib, config_clone.limit_mib);
        assert_eq!(config.spike_limit_mib, config_clone.spike_limit_mib);
    }

    #[tokio::test]
    async fn test_memory_limiter_memory_exceeded_reaction() {
        // 创建一个配置，设置很小的限制来触发内存超限
        let config = MemoryLimiterConfig {
            check_interval: 2, // 2秒检查间隔，满足验证要求
            limit_mib: 1,      // 设置很小的限制，1 MiB
            spike_limit_mib: 1,
        };

        let limiter = MemoryLimiter::new(config);

        // 初始状态：不应该超限
        assert!(!limiter.must_refuse(), "初始状态不应该超限");
        assert!(limiter.get_last_error().is_none(), "初始状态不应该有错误");

        // 等待监控任务检测到内存超限
        // 由于设置了很小的限制，实际内存使用应该会触发限制
        time::sleep(Duration::from_millis(500)).await;

        // 验证 MemoryLimiter 的反应
        // 注意：这只是一个示例测试，实际的内存使用可能不会立即触发
        // 在实际环境中，你可能需要一个更确定的方式来触发内存超限
        println!(
            "当前内存使用情况: {}",
            MemoryLimiter::get_memory_usage().unwrap_or(0)
        );

        // 测试方法调用不会崩溃
        let should_refuse = limiter.must_refuse();
        let last_error = limiter.get_last_error();

        // 记录测试结果
        println!("是否应该拒绝请求: {}", should_refuse);
        println!("最后错误信息: {:?}", last_error);

        // 验证方法能够正常工作（即使没有触发超限）
        // 这确保了代码的健壮性
        assert!(
            matches!(should_refuse, true | false),
            "must_refuse() 应该返回布尔值"
        );

        // 清理资源
        drop(limiter);
    }

    #[test]
    fn test_get_memory_usage() {
        match MemoryLimiter::get_memory_usage() {
            Ok(memory_mib) => {
                println!("Current process memory usage: {} MiB", memory_mib);
                // 验证返回值合理 (应该在合理范围内，比如几MB到几GB)
                assert!(memory_mib > 0, "Memory usage should be greater than 0");
                assert!(
                    memory_mib < 100000,
                    "Memory usage should be less than 100GB (unreasonable)"
                );
            }
            Err(e) => {
                panic!("Failed to get memory usage: {}", e);
            }
        }
    }
}
