use crate::config::MonitoringConfig;
use crate::Result;

use metrics::Unit; // Using metrics macros only
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::RwLock;
use tracing::{error, info};

/// 度量采集器 (MetricsCollector)
///
/// 作用:
/// 1. 汇聚本集群内与 Raft、节点运行状态、性能、健康相关的各类指标数据
/// 2. 通过内部内存结构保存最新快照，供 API / 其它模块查询
/// 3. （可选）注册 Prometheus 指标描述，后续可扩展为暴露 HTTP /metrics 接口
///
/// 线程/异步安全性:
/// - 内部使用 `Arc<RwLock<ClusterMetrics>>` 保护共享指标数据，读多写少场景下性能较优
/// - `shutdown_tx` 用于优雅停止后台周期采集任务
///
/// 扩展点:
/// - 目前 `start_prometheus_server` 未实现，可在其中集成 `metrics-exporter-prometheus` 等 crates
/// - 可在周期采集任务中加入系统层面指标（CPU / 内存 / IO 等）
pub struct MetricsCollector {
    _config: MonitoringConfig,
    metrics: Arc<RwLock<ClusterMetrics>>,
    shutdown_tx: Arc<RwLock<Option<tokio::sync::oneshot::Sender<()>>>>,
}

/// Cluster metrics data structure
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClusterMetrics {
    pub cluster_info: ClusterInfo,
    pub raft_metrics: RaftMetrics,
    pub node_metrics: HashMap<String, NodeMetrics>,
    pub performance_metrics: PerformanceMetrics,
    pub health_metrics: HealthMetrics,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClusterInfo {
    pub cluster_id: String,
    pub leader_id: Option<u64>,
    pub member_count: usize,
    pub managed_node_count: usize,
    pub uptime_seconds: u64,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RaftMetrics {
    pub current_term: u64,
    pub commit_index: u64,
    pub last_applied: u64,
    pub log_entries: u64,
    pub snapshot_index: Option<u64>,
    pub leadership_changes: u64,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeMetrics {
    pub node_id: String,
    pub status: String,
    pub last_seen: chrono::DateTime<chrono::Utc>,
    pub response_time_ms: f64,
    pub health_check_failures: u64,
    pub uptime_percentage: f64,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PerformanceMetrics {
    pub requests_per_second: f64,
    pub average_latency_ms: f64,
    pub p95_latency_ms: f64,
    pub p99_latency_ms: f64,
    pub error_rate: f64,
    pub throughput_bytes_per_second: f64,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealthMetrics {
    pub healthy_nodes: usize,
    pub unhealthy_nodes: usize,
    pub maintenance_nodes: usize,
    pub unknown_nodes: usize,
    pub total_health_checks: u64,
    pub failed_health_checks: u64,
}

impl MetricsCollector {
    /// 创建一个新的 MetricsCollector 实例
    ///
    /// 参数:
    /// - `config`: 监控配置，决定是否启用 Prometheus 指标注册等
    ///
    /// 行为:
    /// - 若启用 Prometheus，会调用 `register_metrics` 预先描述所有自定义指标（便于自描述 / 显示）
    /// - 初始化所有分类指标为默认值（0 或 None），避免后续读取出现缺字段的情况
    ///
    /// 返回:
    /// - 成功: 封装好的 `MetricsCollector`
    /// - 失败: 当前实现中不会失败（预留 Result 便于未来扩展初始化逻辑）
    pub fn new(config: &MonitoringConfig) -> Result<Self> {
        info!("Initializing metrics collector");

        // Initialize Prometheus metrics if enabled
        // TODO: integrate a metrics exporter (e.g., prometheus exporter crate). For now just register metrics.
        if config.prometheus.enabled {
            Self::register_metrics();
        }

        let metrics = ClusterMetrics {
            cluster_info: ClusterInfo {
                cluster_id: "default-cluster".to_string(),
                leader_id: None,
                member_count: 0,
                managed_node_count: 0,
                uptime_seconds: 0,
            },
            raft_metrics: RaftMetrics {
                current_term: 0,
                commit_index: 0,
                last_applied: 0,
                log_entries: 0,
                snapshot_index: None,
                leadership_changes: 0,
            },
            node_metrics: HashMap::new(),
            performance_metrics: PerformanceMetrics {
                requests_per_second: 0.0,
                average_latency_ms: 0.0,
                p95_latency_ms: 0.0,
                p99_latency_ms: 0.0,
                error_rate: 0.0,
                throughput_bytes_per_second: 0.0,
            },
            health_metrics: HealthMetrics {
                healthy_nodes: 0,
                unhealthy_nodes: 0,
                maintenance_nodes: 0,
                unknown_nodes: 0,
                total_health_checks: 0,
                failed_health_checks: 0,
            },
        };

        Ok(Self {
            _config: config.clone(),
            metrics: Arc::new(RwLock::new(metrics)),
            shutdown_tx: Arc::new(RwLock::new(None)),
        })
    }

    /// 启动指标采集后台任务
    ///
    /// 行为:
    /// - 建立一个 oneshot 通道用于后续触发关闭
    /// - 启动 Tokio 任务，每 10 秒执行一次 `collect_metrics`
    /// - 当前仅更新 uptime，后续可扩展系统级指标
    ///
    /// 注意:
    /// - Prometheus HTTP 暴露暂未实现，仅做指标描述与内存累加
    /// - 多次调用 `start` 可能会导致旧任务失去引用，建议只调用一次（可在未来加入防重复保护）
    ///
    /// 返回: Result<()> 始终代表逻辑上的成功/失败
    pub async fn start(&self) -> Result<()> {
        info!("Starting metrics collector");

        let (tx, rx) = tokio::sync::oneshot::channel();
        {
            let mut guard = self.shutdown_tx.write().await;
            *guard = Some(tx);
        }

        // Prometheus HTTP endpoint not yet implemented (exporter placeholder)

        // Start metrics collection loop
        let metrics = self.metrics.clone();
        tokio::spawn(async move {
            let mut interval = tokio::time::interval(Duration::from_secs(10));
            let mut shutdown = rx;

            loop {
                tokio::select! {
                    _ = interval.tick() => {
                        if let Err(e) = Self::collect_metrics(&metrics).await {
                            error!("Failed to collect metrics: {}", e);
                        }
                    }
                    _ = &mut shutdown => {
                        info!("Metrics collector shutting down");
                        break;
                    }
                }
            }
        });

        Ok(())
    }

    /// 关闭指标采集后台任务
    ///
    /// 行为:
    /// - 发送 oneshot 信号通知后台循环退出
    /// - 若已经被取走（已关闭或未启动），则该操作静默返回
    ///
    /// 幂等性:
    /// - 多次调用不会 panic，只是第一次会真正发送关闭信号
    pub async fn shutdown(&self) -> Result<()> {
        info!("Shutting down metrics collector");
        if let Some(tx) = self.shutdown_tx.write().await.take() {
            let _ = tx.send(());
        }

        Ok(())
    }

    /// 获取当前所有聚合指标的快照
    ///
    /// 实现细节:
    /// - 读锁 + clone，避免长时间持有写锁
    /// - 返回的是一个快照副本，不会自动更新
    ///
    /// 用途:
    /// - 对外 API 序列化输出
    /// - 测试验证
    pub async fn get_metrics(&self) -> ClusterMetrics {
        self.metrics.read().await.clone()
    }

    /// 更新集群基本信息指标
    ///
    /// 参数:
    /// - `cluster_id`: 集群唯一标识
    /// - `leader_id`: 当前 Leader 节点 ID，可为空表示未知或未选出
    /// - `member_count`: Raft 成员节点数量（参与共识的节点数）
    /// - `managed_node_count`: 管理的业务节点数量（可能 > Raft 成员，表示代理/管理对象）
    ///
    /// 行为:
    /// - 更新内存模型中的对应字段
    /// - 同步更新 Prometheus 指标 (cluster_members / cluster_managed_nodes / cluster_leader_id)
    ///
    /// 注意:
    /// - 若 leader_id 为 None 不会写入对应 gauge，从而上一次值仍可能留存（可视需求未来改为重置）
    pub async fn update_cluster_info(
        &self,
        cluster_id: &str,
        leader_id: Option<u64>,
        member_count: usize,
        managed_node_count: usize,
    ) -> Result<()> {
        let mut metrics = self.metrics.write().await;
        metrics.cluster_info.cluster_id = cluster_id.to_string();
        metrics.cluster_info.leader_id = leader_id;
        metrics.cluster_info.member_count = member_count;
        metrics.cluster_info.managed_node_count = managed_node_count;

        // Update Prometheus metrics
        metrics::gauge!("cluster_members").set(member_count as f64);
        metrics::gauge!("cluster_managed_nodes").set(managed_node_count as f64);

        if let Some(leader) = leader_id {
            metrics::gauge!("cluster_leader_id").set(leader as f64);
        }

        Ok(())
    }

    /// 更新 Raft 相关指标
    ///
    /// 参数:
    /// - `current_term`: 当前任期
    /// - `commit_index`: 已提交日志索引
    /// - `last_applied`: 已应用到状态机的最大日志索引
    /// - `log_entries`: 日志条目总数（用于观察增长速度）
    /// - `snapshot_index`: 最近快照覆盖到的日志索引（None 表示尚未生成或未知）
    ///
    /// 行为:
    /// - 更新内部结构
    /// - 更新 Prometheus gauge 指标
    /// - 若 `snapshot_index` 为 Some 则更新对应指标，避免无快照时出现空值
    pub async fn update_raft_metrics(
        &self,
        current_term: u64,
        commit_index: u64,
        last_applied: u64,
        log_entries: u64,
        snapshot_index: Option<u64>,
    ) -> Result<()> {
        let mut metrics = self.metrics.write().await;
        metrics.raft_metrics.current_term = current_term;
        metrics.raft_metrics.commit_index = commit_index;
        metrics.raft_metrics.last_applied = last_applied;
        metrics.raft_metrics.log_entries = log_entries;
        metrics.raft_metrics.snapshot_index = snapshot_index;

        // Update Prometheus metrics
        metrics::gauge!("raft_current_term").set(current_term as f64);
        metrics::gauge!("raft_commit_index").set(commit_index as f64);
        metrics::gauge!("raft_last_applied").set(last_applied as f64);
        metrics::gauge!("raft_log_entries").set(log_entries as f64);

        if let Some(snapshot) = snapshot_index {
            metrics::gauge!("raft_snapshot_index").set(snapshot as f64);
        }

        Ok(())
    }

    /// 更新/插入单个节点运行指标
    ///
    /// 参数:
    /// - `node_metrics`: 节点指标结构体，包含状态、响应时间、健康检查失败次数等
    ///
    /// 行为:
    /// - 覆盖式插入 HashMap（若已存在则替换）
    /// - 采样的响应时间/可用率写入 gauge；健康失败次数写入 counter（absolute 方式设置当前值）
    ///
    /// 注意:
    /// - 这里未使用动态 label（如 node_id），是为了简化 demo；
    ///   未来可使用 metrics crate 的 recorder 实现自定义标签维度
    pub async fn update_node_metrics(&self, node_metrics: NodeMetrics) -> Result<()> {
        let mut metrics = self.metrics.write().await;
        metrics
            .node_metrics
            .insert(node_metrics.node_id.clone(), node_metrics.clone());

        // Simplified metrics without dynamic labels for compilation stability
        metrics::gauge!("node_response_time_ms").set(node_metrics.response_time_ms);
        metrics::gauge!("node_uptime_percentage").set(node_metrics.uptime_percentage);
        metrics::counter!("node_health_check_failures")
            .absolute(node_metrics.health_check_failures);

        Ok(())
    }

    /// 更新整体性能指标（聚合层面）
    ///
    /// 参数:
    /// - `perf_metrics`: 包含 QPS、平均/分位延迟、错误率、吞吐等信息
    ///
    /// 行为:
    /// - 覆盖内部缓存
    /// - 逐项写入对应 gauge 指标
    ///
    /// 建议:
    /// - 外部调用方应保证统计窗口一致性（例如都是过去 1 分钟的滑动窗口计算结果）
    pub async fn update_performance_metrics(&self, perf_metrics: PerformanceMetrics) -> Result<()> {
        let mut metrics = self.metrics.write().await;
        metrics.performance_metrics = perf_metrics.clone();

        // Update Prometheus metrics
        metrics::gauge!("requests_per_second").set(perf_metrics.requests_per_second);
        metrics::gauge!("average_latency_ms").set(perf_metrics.average_latency_ms);
        metrics::gauge!("p95_latency_ms").set(perf_metrics.p95_latency_ms);
        metrics::gauge!("p99_latency_ms").set(perf_metrics.p99_latency_ms);
        metrics::gauge!("error_rate").set(perf_metrics.error_rate);
        metrics::gauge!("throughput_bytes_per_second")
            .set(perf_metrics.throughput_bytes_per_second);

        Ok(())
    }

    /// 更新健康状态指标（聚合统计）
    ///
    /// 参数:
    /// - `health_metrics`: 包含各状态节点计数及健康检查次数/失败次数
    ///
    /// 行为:
    /// - 覆盖内部缓存
    /// - 更新节点状态分类 gauge
    /// - 设置累计健康检查次数/失败次数 counter（absolute 设置）
    ///
    /// 说明:
    /// - 若未来需要区分新增增量，可在此基础上维护前一个快照进行差分
    pub async fn update_health_metrics(&self, health_metrics: HealthMetrics) -> Result<()> {
        let mut metrics = self.metrics.write().await;
        metrics.health_metrics = health_metrics.clone();

        // Update Prometheus metrics
        metrics::gauge!("healthy_nodes").set(health_metrics.healthy_nodes as f64);
        metrics::gauge!("unhealthy_nodes").set(health_metrics.unhealthy_nodes as f64);
        metrics::gauge!("maintenance_nodes").set(health_metrics.maintenance_nodes as f64);
        metrics::gauge!("unknown_nodes").set(health_metrics.unknown_nodes as f64);
        metrics::counter!("total_health_checks").absolute(health_metrics.total_health_checks);
        metrics::counter!("failed_health_checks").absolute(health_metrics.failed_health_checks);

        Ok(())
    }

    /// 记录一次 HTTP / API 请求的统计
    ///
    /// 参数:
    /// - `_method`: HTTP 方法（当前未纳入标签体系，预留）
    /// - `_endpoint`: 请求路径/逻辑标识（当前未纳入标签体系，预留）
    /// - `duration`: 本次请求耗时
    /// - `_status`: 返回状态码（当前未纳入标签体系，预留）
    ///
    /// 行为:
    /// - 请求总数 counter +1
    /// - 耗时写入 histogram（单位：秒）
    ///
    /// 扩展建议:
    /// - 可将 method / status / endpoint 作为标签加入 recorder 以支持多维分析
    pub fn record_request(&self, _method: &str, _endpoint: &str, duration: Duration, _status: u16) {
        metrics::counter!("http_requests_total").increment(1);
        metrics::histogram!("http_request_duration_seconds").record(duration.as_secs_f64());
    }

    /// 记录一次 Raft 操作（如 AppendEntries / Vote 等）统计
    ///
    /// 参数:
    /// - `_operation`: 操作类型标识（未使用标签，预留）
    /// - `_success`: 是否成功（未使用标签，预留）
    /// - `duration`: 耗时
    ///
    /// 行为:
    /// - 累计操作次数 counter +1
    /// - 耗时记录到 histogram
    ///
    /// 未来改进:
    /// - 使用标签区分不同操作类型与成功/失败，辅助热点/失败分析
    pub fn record_raft_operation(&self, _operation: &str, _success: bool, duration: Duration) {
        metrics::counter!("raft_operations_total").increment(1);
        metrics::histogram!("raft_operation_duration_seconds").record(duration.as_secs_f64());
    }

    /// 注册（描述）所有自定义 Prometheus 指标
    ///
    /// 说明:
    /// - 通过 `metrics::describe_*` 系列函数，为后续暴露端点提供指标元信息（单位/含义）
    /// - 幂等: 多次调用一般不会产生副作用（底层实现通常去重），但建议只在初始化时调用
    /// - 当前未包含动态标签（label），后续可在 recorder 实现中扩展
    fn register_metrics() {
        // Cluster metrics
        metrics::describe_gauge!("cluster_members", Unit::Count, "Number of cluster members");
        metrics::describe_gauge!(
            "cluster_managed_nodes",
            Unit::Count,
            "Number of managed nodes"
        );
        metrics::describe_gauge!(
            "cluster_leader_id",
            Unit::Count,
            "Current cluster leader ID"
        );

        // Raft metrics
        metrics::describe_gauge!("raft_current_term", Unit::Count, "Current Raft term");
        metrics::describe_gauge!("raft_commit_index", Unit::Count, "Raft commit index");
        metrics::describe_gauge!("raft_last_applied", Unit::Count, "Last applied log index");
        metrics::describe_gauge!("raft_log_entries", Unit::Count, "Number of log entries");
        metrics::describe_gauge!("raft_snapshot_index", Unit::Count, "Snapshot index");

        // Node metrics
        metrics::describe_gauge!(
            "node_response_time_ms",
            Unit::Milliseconds,
            "Node response time"
        );
        metrics::describe_gauge!(
            "node_uptime_percentage",
            Unit::Percent,
            "Node uptime percentage"
        );
        metrics::describe_counter!(
            "node_health_check_failures",
            Unit::Count,
            "Node health check failures"
        );

        // Performance metrics
        metrics::describe_gauge!(
            "requests_per_second",
            Unit::Count,
            "Requests per second (approx)"
        );
        metrics::describe_gauge!("average_latency_ms", Unit::Milliseconds, "Average latency");
        metrics::describe_gauge!(
            "p95_latency_ms",
            Unit::Milliseconds,
            "95th percentile latency"
        );
        metrics::describe_gauge!(
            "p99_latency_ms",
            Unit::Milliseconds,
            "99th percentile latency"
        );
        metrics::describe_gauge!("error_rate", Unit::Percent, "Error rate percentage");
        metrics::describe_gauge!(
            "throughput_bytes_per_second",
            Unit::Count,
            "Throughput in bytes per second"
        );

        // Health metrics
        metrics::describe_gauge!("healthy_nodes", Unit::Count, "Number of healthy nodes");
        metrics::describe_gauge!("unhealthy_nodes", Unit::Count, "Number of unhealthy nodes");
        metrics::describe_gauge!(
            "maintenance_nodes",
            Unit::Count,
            "Number of nodes in maintenance"
        );
        metrics::describe_gauge!(
            "unknown_nodes",
            Unit::Count,
            "Number of nodes with unknown status"
        );
        metrics::describe_counter!(
            "total_health_checks",
            Unit::Count,
            "Total health checks performed"
        );
        metrics::describe_counter!("failed_health_checks", Unit::Count, "Failed health checks");

        // HTTP metrics
        metrics::describe_counter!("http_requests_total", Unit::Count, "Total HTTP requests");
        metrics::describe_histogram!(
            "http_request_duration_seconds",
            Unit::Seconds,
            "HTTP request duration"
        );

        // Raft operation metrics
        metrics::describe_counter!(
            "raft_operations_total",
            Unit::Count,
            "Total Raft operations"
        );
        metrics::describe_histogram!(
            "raft_operation_duration_seconds",
            Unit::Seconds,
            "Raft operation duration"
        );
    }

    /// 启动 Prometheus 指标 HTTP 服务（预留接口）
    ///
    /// 当前实现: 空实现，仅返回 Ok，便于后续无缝接入 exporter（如 metrics-exporter-prometheus）
    /// 未来步骤建议:
    /// 1. 初始化 exporter 并绑定监听地址
    /// 2. 将 handle 保存到结构体中，以便 shutdown 时关闭
    /// 3. 支持自定义路径 /metrics
    async fn start_prometheus_server(&self) -> Result<()> {
        Ok(())
    }

    /// 周期性收集系统/运行态指标
    ///
    /// 当前实现:
    /// - 仅更新 uptime（假设调用间隔固定为 10 秒）
    /// - 写入 Prometheus gauge: cluster_uptime_seconds
    ///
    /// 扩展方向:
    /// - 读取操作系统指标 (CPU、内存、磁盘、网络)
    /// - 采集内部任务队列长度 / backlog / 锁等待时间
    /// - 做异常捕获与采集耗时
    ///
    /// 参数:
    /// - `metrics`: 指向共享 ClusterMetrics 的引用
    ///
    /// 返回: Result<()> —— 当前逻辑基本不会失败
    async fn collect_metrics(metrics: &Arc<RwLock<ClusterMetrics>>) -> Result<()> {
        // Update uptime
        {
            let mut metrics_guard = metrics.write().await;
            metrics_guard.cluster_info.uptime_seconds += 10; // Collection interval
        }

        // Update Prometheus uptime metric
        let uptime = metrics.read().await.cluster_info.uptime_seconds;
        metrics::gauge!("cluster_uptime_seconds").set(uptime as f64);

        Ok(())
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[tokio::test]
    async fn test_metrics_collector() {
        let config = MonitoringConfig {
            prometheus: crate::config::PrometheusConfig {
                enabled: false,
                bind_addr: "127.0.0.1:9090".to_string(),
                path: "/metrics".to_string(),
            },
            tracing: crate::config::TracingConfig {
                enabled: false,
                endpoint: "http://localhost:14268/api/traces".to_string(),
                service_name: "test".to_string(),
                sample_rate: 0.1,
            },
        };

        let metrics_collector = MetricsCollector::new(&config).unwrap();

        // Test metrics updates
        metrics_collector
            .update_cluster_info("test-cluster", Some(1), 3, 10)
            .await
            .unwrap();

        let metrics = metrics_collector.get_metrics().await;
        assert_eq!(metrics.cluster_info.cluster_id, "test-cluster");
        assert_eq!(metrics.cluster_info.leader_id, Some(1));
        assert_eq!(metrics.cluster_info.member_count, 3);
        assert_eq!(metrics.cluster_info.managed_node_count, 10);
    }
}
