//! 集群协调器模块
//! 
//! 作为集群功能的核心协调器，负责：
//! - 集成各个集群子模块
//! - 处理客户端请求路由
//! - 协调集群状态变更
//! - 管理集群生命周期

use crate::cluster::traits::{
    ClusterCoordinator as ClusterCoordinatorTrait, ClusterInfo, HealthStatus,
    SlotManager as SlotManagerTrait
};
use crate::cluster::{
    ClusterConfig, ClusterNode, ClusterNetworkManager, 
    GossipProtocol, SlotManager, ClusterError, ClusterResult,
};
use crate::cluster::discovery::GossipConfig;
use crate::cluster::state::ClusterStateManager;
use async_trait::async_trait;
// use std::collections::HashMap; // 暂时未使用
use std::net::SocketAddr;
use std::sync::Arc;
// use std::time::Duration; // 暂时未使用
use tokio::sync::{broadcast, mpsc, RwLock};
// use tokio::time::{interval, sleep}; // 暂时未使用
use tracing::{error, info, warn};
// use uuid::Uuid; // 暂时未使用

/// 集群命令类型
#[derive(Debug, Clone)]
pub enum ClusterCommand {
    Meet { ip: String, port: u16 },
    Forget { node_id: String },
    Replicate { master_id: String },
    Failover,
    Reset,
    Info,
    Nodes,
    Slots,
    AddSlots { slots: Vec<u16> },
    DelSlots { slots: Vec<u16> },
    SetSlot { slot: u16, action: SlotAction, node_id: Option<String> },
    SaveConfig,
}

/// 槽位操作类型
#[derive(Debug, Clone)]
pub enum SlotAction {
    Migrating,
    Importing,
    Stable,
    Node,
}

/// 集群事件类型
#[derive(Debug, Clone)]
pub enum ClusterEvent {
    NodeJoined { node_id: String, address: SocketAddr },
    NodeLeft { node_id: String },
    NodeFailed { node_id: String },
    MasterChanged { old_master: String, new_master: String },
    SlotAssigned { slot: u16, node_id: String },
    ClusterStateChanged { old_state: String, new_state: String },
}

/// 集群协调器 - 集群功能的核心组件
pub struct ClusterCoordinator {
    /// 本地节点信息
    pub local_node: ClusterNode,
    
    /// 集群配置
    config: ClusterConfig,
    
    /// 集群状态管理器
    state_manager: Arc<ClusterStateManager>,
    
    /// 槽位管理器
    slot_manager: Arc<RwLock<SlotManager>>,
    
    /// 网络管理器（通过依赖注入）
    network_manager: Option<Arc<RwLock<ClusterNetworkManager>>>,
    
    /// Gossip协议（通过依赖注入）
    gossip_protocol: Option<Arc<RwLock<GossipProtocol>>>,
    
    /// 运行状态
    is_running: Arc<RwLock<bool>>,
    
    /// 事件广播器
    event_broadcaster: broadcast::Sender<ClusterEvent>,
    
    /// 命令处理器
    command_sender: mpsc::UnboundedSender<ClusterCommand>,
    command_receiver: Option<mpsc::UnboundedReceiver<ClusterCommand>>,
}

impl ClusterCoordinator {
    /// 创建新的集群协调器（使用依赖注入）
    pub fn new_with_dependencies(
        config: ClusterConfig,
        network_manager: Option<Arc<RwLock<ClusterNetworkManager>>>,
        gossip_protocol: Option<Arc<RwLock<GossipProtocol>>>,
    ) -> ClusterResult<Self> {
        // 验证配置
        config.validate().map_err(ClusterError::Config)?;
        
        // 创建本地节点
        let local_addr = config.redis_socket_addr()
            .map_err(|e| ClusterError::Config(format!("无效的Redis地址: {e}")))?;
        let local_node = ClusterNode::new_master(config.node_id.clone(), local_addr);
        
        // 创建状态管理器
        let state_manager = Arc::new(ClusterStateManager::new(config.clone()));
        
        // 初始化槽位管理器
        let slot_manager = Arc::new(RwLock::new(SlotManager::new()));
        
        // 创建事件通道
        let (event_broadcaster, _) = broadcast::channel(1000);
        let (command_sender, command_receiver) = mpsc::unbounded_channel();
        
        Ok(Self {
            local_node,
            config,
            state_manager,
            slot_manager,
            network_manager,
            gossip_protocol,
            is_running: Arc::new(RwLock::new(false)),
            event_broadcaster,
            command_sender,
            command_receiver: Some(command_receiver),
        })
    }
    
    /// 创建新的集群协调器（传统方式，向后兼容）
    pub async fn new(config: ClusterConfig) -> ClusterResult<Self> {
        // 验证配置
        config.validate().map_err(ClusterError::Config)?;
        
        // 创建网络管理器
        let network_manager = {
            use crate::cluster::network::ClusterNetworkManager;
            let mut nm = ClusterNetworkManager::new(config.clone()).await?;
            nm.bind().await?;
            nm
        };
        let network_manager = Some(Arc::new(RwLock::new(network_manager)));
        
        // 创建状态管理器（用于Gossip协议）
        let state_manager = Arc::new(ClusterStateManager::new(config.clone()));
        
        // 创建 Gossip 协议
        let gossip_config = GossipConfig {
            gossip_interval: config.gossip_interval,
            fanout: config.gossip_fanout as usize,
            fail_timeout: config.fail_timeout,
            max_retries: 3,
            udp_buffer_size: 65536,
            enable_udp_broadcast: true,
        };
        
        let cluster_addr = config.cluster_socket_addr()
            .map_err(|e| ClusterError::Config(format!("无效的集群地址: {e}")))?;
        
        let gossip_protocol = GossipProtocol::new(
            config.node_id.clone(),
            cluster_addr,
            state_manager.state(),
            gossip_config,
        ).await?;
        let gossip_protocol = Some(Arc::new(RwLock::new(gossip_protocol)));
        
        Self::new_with_dependencies(config, network_manager, gossip_protocol)
    }
    
    /// 获取本地节点ID
    pub async fn get_local_node_id(&self) -> String {
        self.local_node.id.clone()
    }
    
    /// 获取集群节点信息
    pub async fn get_cluster_nodes(&self) -> ClusterResult<String> {
        let state = self.state_manager.get_snapshot().await;
        let mut nodes_info = Vec::new();
        
        // 添加本地节点信息
        nodes_info.push(self.local_node.to_info_string());
        
        // 添加其他节点信息
        for node in state.nodes.values() {
            if node.id != self.local_node.id {
                nodes_info.push(node.to_info_string());
            }
        }
        
        Ok(nodes_info.join("\n"))
    }
    
    /// 忘记节点
    pub async fn forget_node(&mut self, node_id: &str) -> ClusterResult<()> {
        self.state_manager.update_state(|state| {
            state.remove_node(node_id);
        }).await;
        info!("已忘记节点: {}", node_id);
        Ok(())
    }
    
    /// Meet节点
    pub async fn meet_node(&self, addr_str: &str) -> ClusterResult<()> {
        let addr: SocketAddr = addr_str.parse()
            .map_err(|e| ClusterError::Config(format!("无效的地址格式: {e}")))?;
        
        info!("尝试连接到节点: {}", addr);
        
        // TODO: 实现实际的meet逻辑
        // 1. 建立网络连接
        // 2. 发送meet消息
        // 3. 交换集群信息
        
        Ok(())
    }
    
    /// 获取集群信息
    pub async fn get_cluster_info(&self) -> ClusterResult<ClusterInfo> {
        let state = self.state_manager.get_snapshot().await;
        let slot_manager = self.slot_manager.read().await;
        
        let node_count = state.nodes.len() + 1; // +1 for local node
        let master_count = state.nodes.values()
            .filter(|node| node.is_master())
            .count() + 1; // +1 for local master
        let slave_count = node_count - master_count;
        
        // 计算已分配的槽位数量
        let mut assigned_slots = 0;
        for i in 0..16384 {
            if slot_manager.get_slot_node(i as u16).await.is_some() {
                assigned_slots += 1;
            }
        }
        
        Ok(ClusterInfo {
            cluster_id: state.cluster_id.clone(),
            node_count,
            master_count,
            slave_count,
            slots_assigned: assigned_slots,
            cluster_state: "ok".to_string(),
            cluster_size: master_count,
        })
    }
    
    /// 获取事件接收器
    pub fn subscribe_events(&self) -> broadcast::Receiver<ClusterEvent> {
        self.event_broadcaster.subscribe()
    }
    
    /// 发送集群命令
    pub async fn send_command(&self, command: ClusterCommand) -> ClusterResult<()> {
        self.command_sender.send(command)
            .map_err(|e| ClusterError::Config(format!("发送命令失败: {e}")))
    }
    
    /// 启动命令处理器
    async fn start_command_processor(&mut self) -> ClusterResult<()> {
        let mut receiver = self.command_receiver.take()
            .ok_or_else(|| ClusterError::Config("命令接收器已被使用".to_string()))?;
        
        let coordinator = Arc::new(RwLock::new(self.clone()));
        let is_running = self.is_running.clone();
        
        tokio::spawn(async move {
            while *is_running.read().await {
                match receiver.recv().await {
                    Some(command) => {
                        if let Err(e) = Self::handle_command(coordinator.clone(), command).await {
                            error!("处理集群命令失败: {}", e);
                        }
                    }
                    None => break,
                }
            }
        });
        
        Ok(())
    }
    
    /// 处理集群命令
    async fn handle_command(
        coordinator: Arc<RwLock<ClusterCoordinator>>,
        command: ClusterCommand,
    ) -> ClusterResult<()> {
        match command {
            ClusterCommand::Meet { ip, port } => {
                let addr_str = format!("{ip}:{port}");
                coordinator.read().await.meet_node(&addr_str).await?;
            }
            ClusterCommand::Forget { node_id } => {
                coordinator.write().await.forget_node(&node_id).await?;
            }
            _ => {
                warn!("未实现的集群命令: {:?}", command);
            }
        }
        Ok(())
    }
}

impl Clone for ClusterCoordinator {
    fn clone(&self) -> Self {
        Self {
            local_node: self.local_node.clone(),
            config: self.config.clone(),
            state_manager: self.state_manager.clone(),
            slot_manager: self.slot_manager.clone(),
            network_manager: self.network_manager.clone(),
            gossip_protocol: self.gossip_protocol.clone(),
            is_running: self.is_running.clone(),
            event_broadcaster: self.event_broadcaster.clone(),
            command_sender: self.command_sender.clone(),
            command_receiver: None, // 新实例不复制接收器
        }
    }
}

#[async_trait]
impl ClusterCoordinatorTrait for ClusterCoordinator {
    async fn start(&mut self) -> ClusterResult<()> {
        {
            let mut running = self.is_running.write().await;
            if *running {
                return Ok(());
            }
            *running = true;
        }
        
        info!("启动集群协调器...");
        
        // 启动命令处理器
        self.start_command_processor().await?;
        
        info!("集群协调器启动成功");
        Ok(())
    }
    
    async fn stop(&mut self) -> ClusterResult<()> {
        let mut running = self.is_running.write().await;
        if !*running {
            return Ok(());
        }
        
        *running = false;
        info!("集群协调器已停止");
        Ok(())
    }
    
    async fn join_cluster(&mut self, seeds: Vec<SocketAddr>) -> ClusterResult<()> {
        info!("加入集群，种子节点: {:?}", seeds);
        
        for addr in seeds {
            if let Err(e) = self.meet_node(&addr.to_string()).await {
                warn!("连接种子节点 {} 失败: {}", addr, e);
            } else {
                info!("成功连接到种子节点: {}", addr);
            }
        }
        
        Ok(())
    }
    
    async fn leave_cluster(&mut self) -> ClusterResult<()> {
        info!("离开集群");
        
        // TODO: 实现离开集群逻辑
        // 1. 通知其他节点
        // 2. 迁移槽位
        // 3. 清理状态
        
        self.stop().await
    }
    
    async fn route_request(&self, key: &str) -> ClusterResult<String> {
        let slot_manager = self.slot_manager.read().await;
        let slot = slot_manager.calculate_slot(key);
        
        if let Some(node_id) = slot_manager.get_slot_node(slot).await {
            Ok(node_id.clone())
        } else {
            // 槽位未分配，返回本地节点
            Ok(self.local_node.id.clone())
        }
    }
    
    async fn get_cluster_info(&self) -> ClusterInfo {
        self.get_cluster_info().await.unwrap_or_else(|_| ClusterInfo {
            cluster_id: "unknown".to_string(),
            node_count: 1,
            master_count: 1,
            slave_count: 0,
            slots_assigned: 0,
            cluster_state: "fail".to_string(),
            cluster_size: 1,
        })
    }
    
    async fn execute_cluster_command(&mut self, command: String) -> ClusterResult<String> {
        // 简单的命令解析
        let parts: Vec<&str> = command.split_whitespace().collect();
        if parts.is_empty() {
            return Err(ClusterError::Config("命令不能为空".to_string()));
        }
        
        match parts[0].to_uppercase().as_str() {
            "NODES" => self.get_cluster_nodes().await,
            "INFO" => {
                let info = self.get_cluster_info().await.unwrap_or_default();
                Ok(format!(
                    "cluster_state:{}\ncluster_slots_assigned:{}\ncluster_known_nodes:{}",
                    info.cluster_state, info.slots_assigned, info.node_count
                ))
            }
            "MYID" => Ok(self.get_local_node_id().await),
            _ => Err(ClusterError::Config(format!("未知命令: {}", parts[0]))),
        }
    }
    
    async fn health_check(&self) -> ClusterResult<HealthStatus> {
        let _state = self.state_manager.get_snapshot().await;
        let is_healthy = true; // TODO: 实现实际的健康检查逻辑
        
        let mut issues = Vec::new();
        let mut recommendations = Vec::new();
        
        if !is_healthy {
            issues.push("集群状态不健康".to_string());
            recommendations.push("检查节点连接状态".to_string());
        }
        
        // 检查槽位覆盖
        let slot_manager = self.slot_manager.read().await;
        let unassigned_slots = slot_manager.get_unassigned_slots();
        if !unassigned_slots.is_empty() {
            issues.push(format!("有 {} 个槽位未分配", unassigned_slots.len()));
            recommendations.push("分配所有槽位以确保集群完整性".to_string());
        }
        
        Ok(HealthStatus {
            is_healthy: is_healthy && unassigned_slots.is_empty(),
            issues,
            recommendations,
        })
    }
}

impl Default for ClusterInfo {
    fn default() -> Self {
        Self {
            cluster_id: "unknown".to_string(),
            node_count: 0,
            master_count: 0,
            slave_count: 0,
            slots_assigned: 0,
            cluster_state: "fail".to_string(),
            cluster_size: 0,
        }
    }
}

