use crate::api::ApiServer;
use crate::events::EventSystem;
use crate::health::HealthChecker;
use crate::metrics::MetricsCollector;
use crate::network::NetworkManager;
use crate::raft::RaftManager;
use crate::storage::StorageManager;
use crate::{ClusterId, Config, Error, NodeId, Result};

use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::{error, info, warn};

/// Cluster manager that coordinates all components
pub struct ClusterManager {
    config: Config,
    raft_manager: Arc<RaftManager>,
    _storage_manager: Arc<StorageManager>,
    _network_manager: Arc<NetworkManager>,
    // Wrapped in RwLock so we can start() later even with &self
    api_server: Arc<RwLock<Option<ApiServer>>>,
    health_checker: Arc<HealthChecker>,
    event_system: Arc<EventSystem>,
    metrics_collector: Arc<MetricsCollector>,
    state: Arc<RwLock<ClusterState>>,
    started: Arc<RwLock<bool>>, // prevent double start
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClusterState {
    pub cluster_id: ClusterId,
    pub is_leader: bool,
    pub members: Vec<NodeInfo>,
    pub managed_nodes: Vec<ManagedNode>,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeInfo {
    pub id: NodeId,
    pub address: String,
    pub role: NodeRole,
    pub status: NodeStatus,
}

#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum NodeRole {
    Leader,
    Follower,
    Learner,
}

#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum NodeStatus {
    Healthy,
    Unhealthy,
    Maintenance,
    Unknown,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManagedNode {
    pub id: String,
    pub address: String,
    pub tags: std::collections::HashMap<String, String>,
    pub status: NodeStatus,
    pub last_seen: u64,
    pub health_checks: Vec<HealthCheckResult>,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealthCheckResult {
    pub name: String,
    pub status: HealthCheckStatus,
    pub duration: std::time::Duration,
    pub message: Option<String>,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum HealthCheckStatus {
    Pass,
    Fail,
    Warn,
}

impl ClusterManager {
    /// 创建新的 ClusterManager 实例
    ///
    /// 负责初始化所有子系统: 存储、网络、事件、指标、Raft、健康检查、API 等。
    /// 根据配置选择不同存储后端（memory / duckdb）。
    ///
    /// 返回: 初始化完成后的自引用对象。
    pub async fn new(config: Config) -> Result<Self> {
        info!("Initializing Cluster Manager");

        // Initialize storage
        let storage_backend = match config.storage.storage_type.as_str() {
            "duckdb" => {
                let duckdb_config = config
                    .storage
                    .duckdb
                    .as_ref()
                    .ok_or_else(|| Error::cluster("DuckDB configuration missing"))?;
                StorageManager::new_duckdb(&duckdb_config.path).await?
            }
            "memory" => StorageManager::new_memory().await?,
            other => {
                return Err(Error::cluster(format!(
                    "Unsupported storage type: {}",
                    other
                )));
            }
        };
        let storage_manager = Arc::new(storage_backend);

        // Initialize network
        let network_manager = Arc::new(NetworkManager::new(&config.network).await?);

        // Initialize event system
        let event_system = Arc::new(EventSystem::new());

        // Initialize metrics collector
        let metrics_collector = Arc::new(MetricsCollector::new(&config.monitoring)?);

        // Initialize Raft manager
        let raft_manager = Arc::new(
            RaftManager::new(
                config.node.id,
                &config.raft,
                storage_manager.clone(),
                network_manager.clone(),
                event_system.clone(),
            )
            .await?,
        );

        // Initialize health checker
        let health_checker =
            Arc::new(HealthChecker::new(&config.health_check, event_system.clone()).await?);

        // Initialize API server if enabled
        let api_server = if config.api.enabled {
            Some(
                ApiServer::new(
                    &config.api,
                    raft_manager.clone(),
                    health_checker.clone(),
                    event_system.clone(),
                    metrics_collector.clone(),
                )
                .await?,
            )
        } else {
            None
        };

        let state = Arc::new(RwLock::new(ClusterState {
            cluster_id: config.cluster.name.clone(),
            is_leader: false,
            members: Vec::new(),
            managed_nodes: Vec::new(),
        }));

        Ok(Self {
            config,
            raft_manager,
            _storage_manager: storage_manager,
            _network_manager: network_manager,
            api_server: Arc::new(RwLock::new(api_server)),
            health_checker,
            event_system,
            metrics_collector,
            state,
            started: Arc::new(RwLock::new(false)),
        })
    }

    /// 初始化新集群（仅用于第一个引导节点）
    ///
    /// 行为:
    /// - 调用 Raft 初始化逻辑（OpenRaft: initialize）
    /// - 更新本地状态为 leader，增加自身为成员
    /// - 发送 cluster_initialized 事件
    pub async fn initialize_cluster(&self) -> Result<()> {
        info!("Initializing new cluster");

        // Initialize Raft cluster
        self.raft_manager.initialize_cluster().await?;

        // Update state
        {
            let mut state = self.state.write().await;
            state.is_leader = true;
            state.members.push(NodeInfo {
                id: self.config.node.id,
                address: self.config.node.bind_addr.clone(),
                role: NodeRole::Leader,
                status: NodeStatus::Healthy,
            });
        }

        // Emit cluster initialized event
        self.event_system
            .emit_cluster_initialized(&self.config.cluster.name)
            .await?;

        info!("Cluster initialized successfully");
        Ok(())
    }

    /// 加入已有集群（非引导节点）
    ///
    /// 行为:
    /// - 通过 network manager 连接 peers
    /// - 触发 cluster_joined 事件
    pub async fn join_cluster(&self) -> Result<()> {
        info!("Joining existing cluster");

        // Join Raft cluster
        self.raft_manager
            .join_cluster(&self.config.cluster.peers)
            .await?;

        // Emit cluster joined event
        self.event_system
            .emit_cluster_joined(&self.config.cluster.name)
            .await?;

        info!("Joined cluster successfully");
        Ok(())
    }

    /// 启动 ClusterManager 中的所有运行期组件
    ///
    /// 包括: Raft、健康检查、指标采集、事件处理循环、(可选) API。
    /// 当前 API server 因为存储在 Option 中且不可变引用，暂未调用其 start。
    pub async fn start(&self) -> Result<()> {
        {
            let mut started = self.started.write().await;
            if *started {
                info!("Cluster Manager already started");
                return Ok(());
            }
            *started = true;
        }

        info!("Starting Cluster Manager");

        self.raft_manager.start().await?;
        self.health_checker.start().await?;
        self.metrics_collector.start().await?;

        // Start API server (requires mutable access)
        if self.config.api.enabled {
            if let Some(server) = &mut *self.api_server.write().await {
                server.start().await?;
            }
        }

        self.start_event_processing().await?;

        info!("Cluster Manager started successfully");
        Ok(())
    }

    /// 关闭 ClusterManager 及其子组件（逆序关闭）
    pub async fn shutdown(&self) -> Result<()> {
        info!("Shutting down Cluster Manager");

        // Stop API server
        if let Some(api_server) = &*self.api_server.write().await {
            api_server.shutdown().await?;
        }

        // Stop health checker
        self.health_checker.shutdown().await?;

        // Stop metrics collector
        self.metrics_collector.shutdown().await?;

        // Stop Raft manager
        self.raft_manager.shutdown().await?;

        info!("Cluster Manager shut down successfully");
        Ok(())
    }

    /// 添加一个被管理的业务节点（需 Leader 权限）
    ///
    /// 步骤:
    /// 1. 校验当前节点是否 leader
    /// 2. 通过 Raft 状态机持久化（复制日志）
    /// 3. 注册到健康检查
    /// 4. 更新本地内存状态
    /// 5. 发出 node_added 事件
    pub async fn add_managed_node(&self, node: ManagedNode) -> Result<()> {
        info!("Adding managed node: {}", node.id);

        // Check if we're the leader
        if !self.is_leader().await {
            return Err(Error::cluster("Only leader can add managed nodes"));
        }

        // Store node in Raft state machine
        self.raft_manager.add_managed_node(node.clone()).await?;

        // Add to health checking
        self.health_checker
            .add_node(&node.id, &node.address)
            .await?;

        // Update local state
        {
            let mut state = self.state.write().await;
            state.managed_nodes.push(node.clone());
        }

        // Emit node added event
        self.event_system.emit_node_added(&node.id).await?;

        info!("Managed node added successfully: {}", node.id);
        Ok(())
    }

    /// 移除一个被管理节点（需 Leader 权限）
    ///
    /// 步骤: 与 add 相反，删除状态机 / 健康检查 / 本地缓存，并发事件。
    pub async fn remove_managed_node(&self, node_id: &str) -> Result<()> {
        info!("Removing managed node: {}", node_id);

        // Check if we're the leader
        if !self.is_leader().await {
            return Err(Error::cluster("Only leader can remove managed nodes"));
        }

        // Remove from Raft state machine
        self.raft_manager.remove_managed_node(node_id).await?;

        // Remove from health checking
        self.health_checker.remove_node(node_id).await?;

        // Update local state
        {
            let mut state = self.state.write().await;
            state.managed_nodes.retain(|n| n.id != node_id);
        }

        // Emit node removed event
        self.event_system.emit_node_removed(node_id).await?;

        info!("Managed node removed successfully: {}", node_id);
        Ok(())
    }

    /// 获取当前集群状态快照（读锁 + clone）
    pub async fn get_cluster_state(&self) -> ClusterState {
        self.state.read().await.clone()
    }

    /// 判断当前节点是否为 Raft leader（通过 RaftManager 查询）
    pub async fn is_leader(&self) -> bool {
        self.raft_manager.is_leader().await
    }

    /// 获取底层 Raft Metrics（原始 JSON）
    pub async fn get_metrics(&self) -> Result<serde_json::Value> {
        self.raft_manager.get_metrics().await
    }

    /// 启动事件处理异步任务，订阅内部 EventSystem 广播
    ///
    /// 当前处理的事件类型:
    /// - raft_leadership_change: 更新 is_leader 状态
    /// - node_health_change: 若变为 unhealthy 触发故障处理流程
    async fn start_event_processing(&self) -> Result<()> {
        let event_system = self.event_system.clone();
        let raft_manager = self.raft_manager.clone();
        let health_checker = self.health_checker.clone();
        let state = self.state.clone();

        tokio::spawn(async move {
            let mut receiver = event_system.subscribe().await;

            while let Ok(event) = receiver.recv().await {
                match event.event_type.as_str() {
                    "raft_leadership_change" => {
                        if let Some(is_leader) =
                            event.data.get("is_leader").and_then(|v| v.as_bool())
                        {
                            let mut state_guard = state.write().await;
                            state_guard.is_leader = is_leader;

                            if is_leader {
                                info!("Became cluster leader");
                            } else {
                                info!("Lost cluster leadership");
                            }
                        }
                    }
                    "node_health_change" => {
                        if let (Some(node_id), Some(status)) = (
                            event.data.get("node_id").and_then(|v| v.as_str()),
                            event.data.get("status").and_then(|v| v.as_str()),
                        ) {
                            info!("Node {} health changed to {}", node_id, status);

                            // Handle node failure
                            if status == "unhealthy" {
                                if let Err(e) = Self::handle_node_failure(
                                    node_id,
                                    &raft_manager,
                                    &health_checker,
                                )
                                .await
                                {
                                    error!("Failed to handle node failure: {}", e);
                                }
                            }
                        }
                    }
                    _ => {
                        // Handle other events
                    }
                }
            }
        });

        Ok(())
    }

    /// 处理节点故障逻辑（被动触发）
    ///
    /// 过程:
    /// 1. 触发一次立即健康检查
    /// 2. 等待一段缓冲期（30s）以避免瞬时网络抖动
    /// 3. 若仍不健康，则准备执行 failover（TODO）
    async fn handle_node_failure(
        node_id: &str,
        _raft_manager: &Arc<RaftManager>,
        health_checker: &Arc<HealthChecker>,
    ) -> Result<()> {
        warn!("Handling node failure: {}", node_id);

        // Trigger additional health checks
        health_checker.trigger_check(node_id).await?;

        // If still unhealthy after additional checks, trigger failover
        tokio::time::sleep(std::time::Duration::from_secs(30)).await;

        if !health_checker.is_node_healthy(node_id).await? {
            info!("Triggering failover for node: {}", node_id);
            // Implement failover logic here
        }

        Ok(())
    }
}
