//! 集群状态管理模块
//! 
//! 管理集群的全局状态，包括节点信息、槽位分配、配置纪元等

use crate::cluster::{ClusterNode, ClusterConfig, ClusterError, ClusterResult};

use crate::cluster::traits::{
    StateManager as StateManagerTrait, ClusterStateInfo
};
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::sync::RwLock;
use uuid::Uuid;

/// 集群状态
#[derive(Debug, Clone)]
pub struct ClusterState {
    /// 集群ID
    pub cluster_id: String,
    
    /// 当前配置纪元 (用于处理脑裂和选举)
    pub current_epoch: u64,
    
    /// 集群创建时间
    pub created_at: u64,
    
    /// 最后更新时间
    pub last_updated: u64,
    
    /// 集群状态标志
    pub state: ClusterStateFlag,
    
    /// 节点信息映射
    pub nodes: HashMap<String, ClusterNode>,
    
    /// 槽位分配信息
    pub slot_assignments: HashMap<u16, String>,
    
    /// 配置信息
    pub config: ClusterConfig,
    
    /// 故障节点列表
    pub failed_nodes: Vec<String>,
    
    /// 迁移信息
    pub migrations: HashMap<u16, MigrationInfo>,
    
    /// 元数据
    pub metadata: HashMap<String, String>,
}

/// 集群状态标志
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum ClusterStateFlag {
    /// 正常状态 - 所有槽位已分配，集群可用
    Ok,
    /// 故障状态 - 部分槽位不可用
    Fail,
    /// 部分故障 - 部分节点故障但集群仍可用
    Pfail,
    /// 初始化中
    Initializing,
    /// 重配置中
    Reconfiguring,
    /// 维护模式
    Maintenance,
}

/// 迁移信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MigrationInfo {
    /// 源节点
    pub source: String,
    /// 目标节点
    pub target: String,
    /// 开始时间
    pub started_at: u64,
    /// 状态
    pub status: MigrationStatus,
    /// 进度 (0.0 - 1.0)
    pub progress: f64,
}

/// 迁移状态
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum MigrationStatus {
    Preparing,
    InProgress,
    Completed,
    Failed,
    Cancelled,
}

impl Default for ClusterState {
    fn default() -> Self {
        let now = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs();
            
        Self {
            cluster_id: Uuid::new_v4().to_string(),
            current_epoch: 0,
            created_at: now,
            last_updated: now,
            state: ClusterStateFlag::Initializing,
            nodes: HashMap::new(),
            slot_assignments: HashMap::new(),
            config: ClusterConfig::default(),
            failed_nodes: Vec::new(),
            migrations: HashMap::new(),
            metadata: HashMap::new(),
        }
    }
}

impl ClusterState {
    /// 创建新的集群状态
    pub fn new(config: ClusterConfig) -> Self {
        let mut state = Self::default();
        state.config = config;
        state
    }
    
    /// 更新最后修改时间和纪元
    pub fn touch(&mut self) {
        self.last_updated = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs();
        self.current_epoch += 1;
    }
    
    /// 添加节点
    pub fn add_node(&mut self, node: ClusterNode) {
        self.nodes.insert(node.id.clone(), node);
        self.touch();
    }
    
    /// 移除节点
    pub fn remove_node(&mut self, node_id: &str) -> Option<ClusterNode> {
        let removed = self.nodes.remove(node_id);
        if removed.is_some() {
            // 移除故障列表中的节点
            self.failed_nodes.retain(|id| id != node_id);
            // 移除槽位分配
            self.slot_assignments.retain(|_, id| id != node_id);
            self.touch();
        }
        removed
    }
    
    /// 获取节点
    pub fn get_node(&self, node_id: &str) -> Option<&ClusterNode> {
        self.nodes.get(node_id)
    }
    
    /// 获取节点(可变)
    pub fn get_node_mut(&mut self, node_id: &str) -> Option<&mut ClusterNode> {
        self.nodes.get_mut(node_id)
    }
    
    /// 标记节点故障
    pub fn mark_node_failed(&mut self, node_id: &str) {
        if let Some(node) = self.nodes.get_mut(node_id) {
            node.set_state(crate::cluster::NodeState::Fail);
            if !self.failed_nodes.contains(&node_id.to_string()) {
                self.failed_nodes.push(node_id.to_string());
            }
            self.touch();
        }
    }
    
    /// 标记节点恢复
    pub fn mark_node_recovered(&mut self, node_id: &str) {
        if let Some(node) = self.nodes.get_mut(node_id) {
            node.set_state(crate::cluster::NodeState::Online);
            self.failed_nodes.retain(|id| id != node_id);
            self.touch();
        }
    }
    
    /// 分配槽位
    pub fn assign_slot(&mut self, slot: u16, node_id: &str) -> Result<(), String> {
        if slot >= 16384 {
            return Err(format!("无效的槽位: {slot}"));
        }
        
        if !self.nodes.contains_key(node_id) {
            return Err(format!("节点不存在: {node_id}"));
        }
        
        self.slot_assignments.insert(slot, node_id.to_string());
        
        // 更新节点槽位信息
        if let Some(node) = self.nodes.get_mut(node_id) {
            node.add_slot(slot);
        }
        
        self.touch();
        Ok(())
    }
    
    /// 取消槽位分配
    pub fn unassign_slot(&mut self, slot: u16) -> Option<String> {
        if let Some(node_id) = self.slot_assignments.remove(&slot) {
            // 更新节点槽位信息
            if let Some(node) = self.nodes.get_mut(&node_id) {
                node.remove_slot(slot);
            }
            self.touch();
            Some(node_id)
        } else {
            None
        }
    }
    
    /// 获取槽位的节点
    pub fn get_slot_node(&self, slot: u16) -> Option<&String> {
        self.slot_assignments.get(&slot)
    }
    
    /// 开始槽位迁移
    pub fn start_migration(&mut self, slot: u16, source: &str, target: &str) -> Result<(), String> {
        if !self.nodes.contains_key(source) {
            return Err(format!("源节点不存在: {source}"));
        }
        
        if !self.nodes.contains_key(target) {
            return Err(format!("目标节点不存在: {target}"));
        }
        
        if self.migrations.contains_key(&slot) {
            return Err(format!("槽位 {slot} 已在迁移中"));
        }
        
        let migration = MigrationInfo {
            source: source.to_string(),
            target: target.to_string(),
            started_at: SystemTime::now()
                .duration_since(UNIX_EPOCH)
                .unwrap()
                .as_secs(),
            status: MigrationStatus::Preparing,
            progress: 0.0,
        };
        
        self.migrations.insert(slot, migration);
        self.touch();
        Ok(())
    }
    
    /// 完成槽位迁移
    pub fn complete_migration(&mut self, slot: u16) -> Result<(), String> {
        if let Some(migration) = self.migrations.get(&slot) {
            let target_node = migration.target.clone();
            
            // 先移除迁移记录
            self.migrations.remove(&slot);
            
            // 然后更新槽位分配
            self.assign_slot(slot, &target_node)?;
            
            self.touch();
            Ok(())
        } else {
            Err(format!("槽位 {slot} 不在迁移中"))
        }
    }
    
    /// 取消槽位迁移
    pub fn cancel_migration(&mut self, slot: u16) -> Result<(), String> {
        if let Some(mut migration) = self.migrations.remove(&slot) {
            migration.status = MigrationStatus::Cancelled;
            self.touch();
            Ok(())
        } else {
            Err(format!("槽位 {slot} 不在迁移中"))
        }
    }
    
    /// 检查集群健康状态
    pub fn check_health(&mut self) {
        let total_nodes = self.nodes.len();
        let failed_nodes = self.failed_nodes.len();
        let assigned_slots = self.slot_assignments.len();
        
        if total_nodes == 0 {
            self.state = ClusterStateFlag::Initializing;
        } else if assigned_slots < 16384 {
            self.state = ClusterStateFlag::Initializing;
        } else if failed_nodes == 0 {
            self.state = ClusterStateFlag::Ok;
        } else if failed_nodes < total_nodes / 2 {
            self.state = ClusterStateFlag::Pfail;
        } else {
            self.state = ClusterStateFlag::Fail;
        }
    }
    
    /// 获取主节点列表
    pub fn master_nodes(&self) -> Vec<&ClusterNode> {
        self.nodes.values()
            .filter(|node| node.is_master())
            .collect()
    }
    
    /// 获取从节点列表
    pub fn slave_nodes(&self) -> Vec<&ClusterNode> {
        self.nodes.values()
            .filter(|node| node.is_slave())
            .collect()
    }
    
    /// 获取在线节点列表
    pub fn online_nodes(&self) -> Vec<&ClusterNode> {
        self.nodes.values()
            .filter(|node| node.is_online())
            .collect()
    }
    
    /// 获取故障节点列表
    pub fn failed_nodes(&self) -> Vec<&ClusterNode> {
        self.failed_nodes.iter()
            .filter_map(|id| self.nodes.get(id))
            .collect()
    }
    
    /// 计算节点统计信息
    pub fn node_stats(&self) -> NodeStats {
        let total = self.nodes.len();
        let masters = self.master_nodes().len();
        let slaves = self.slave_nodes().len();
        let online = self.online_nodes().len();
        let failed = self.failed_nodes.len();
        
        NodeStats {
            total,
            masters,
            slaves,
            online,
            failed,
        }
    }
    
    /// 计算槽位统计信息
    pub fn slot_stats(&self) -> SlotStats {
        let assigned = self.slot_assignments.len();
        let unassigned = 16384 - assigned;
        let migrating = self.migrations.len();
        
        SlotStats {
            total: 16384,
            assigned,
            unassigned,
            migrating,
        }
    }
    
    /// 生成集群信息字符串
    pub fn to_info_string(&self) -> String {
        let node_stats = self.node_stats();
        let slot_stats = self.slot_stats();
        
        format!(
            "cluster_state:{:?}\n\
             cluster_slots_assigned:{}\n\
             cluster_slots_ok:{}\n\
             cluster_slots_pfail:0\n\
             cluster_slots_fail:0\n\
             cluster_known_nodes:{}\n\
             cluster_size:{}\n\
             cluster_current_epoch:{}\n\
             cluster_my_epoch:{}\n\
             cluster_stats_messages_sent:0\n\
             cluster_stats_messages_received:0",
            self.state,
            slot_stats.assigned,
            slot_stats.assigned,
            node_stats.total,
            node_stats.masters,
            self.current_epoch,
            self.current_epoch
        )
    }
    
    /// 设置元数据
    pub fn set_metadata<K: Into<String>, V: Into<String>>(&mut self, key: K, value: V) {
        self.metadata.insert(key.into(), value.into());
        self.touch();
    }
    
    /// 获取元数据
    pub fn get_metadata(&self, key: &str) -> Option<&String> {
        self.metadata.get(key)
    }
    
    /// 清理过期的迁移记录
    pub fn cleanup_expired_migrations(&mut self, timeout: Duration) {
        let now = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs();
            
        let expired_slots: Vec<u16> = self.migrations
            .iter()
            .filter(|(_, migration)| {
                now - migration.started_at > timeout.as_secs()
            })
            .map(|(&slot, _)| slot)
            .collect();
            
        for slot in expired_slots {
            if let Some(mut migration) = self.migrations.remove(&slot) {
                migration.status = MigrationStatus::Failed;
            }
        }
        
        if !self.migrations.is_empty() {
            self.touch();
        }
    }
}

/// 节点统计信息
#[derive(Debug, Clone)]
pub struct NodeStats {
    pub total: usize,
    pub masters: usize,
    pub slaves: usize,
    pub online: usize,
    pub failed: usize,
}

/// 槽位统计信息
#[derive(Debug, Clone)]
pub struct SlotStats {
    pub total: usize,
    pub assigned: usize,
    pub unassigned: usize,
    pub migrating: usize,
}

/// 集群状态管理器
pub struct ClusterStateManager {
    /// 集群状态
    state: Arc<RwLock<ClusterState>>,
    
    /// 健康检查间隔
    health_check_interval: Duration,
    
    /// 运行状态
    is_running: Arc<RwLock<bool>>,
}

impl ClusterStateManager {
    /// 创建新的状态管理器
    pub fn new(config: ClusterConfig) -> Self {
        let state = ClusterState::new(config);
        
        Self {
            state: Arc::new(RwLock::new(state)),
            health_check_interval: Duration::from_secs(5),
            is_running: Arc::new(RwLock::new(false)),
        }
    }
    
    /// 获取状态引用
    pub fn state(&self) -> Arc<RwLock<ClusterState>> {
        self.state.clone()
    }
    
    /// 获取节点ID (返回一个默认的节点ID或从配置中获取)
    pub fn get_node_id(&self) -> String {
        // 简单实现：返回集群配置中的节点ID
        // 在实际实现中，这应该从配置或本地节点信息中获取
        "local-node".to_string()
    }
    
    /// 启动状态管理器
    pub async fn start(&self) {
        let mut running = self.is_running.write().await;
        if *running {
            return;
        }
        *running = true;
        
        // 启动健康检查任务
        let state = self.state.clone();
        let interval = self.health_check_interval;
        let is_running = self.is_running.clone();
        
        tokio::spawn(async move {
            let mut ticker = tokio::time::interval(interval);
            
            while *is_running.read().await {
                ticker.tick().await;
                
                let mut state = state.write().await;
                state.check_health();
                state.cleanup_expired_migrations(Duration::from_secs(300)); // 5分钟超时
            }
        });
    }
    
    /// 停止状态管理器
    pub async fn stop(&self) {
        let mut running = self.is_running.write().await;
        *running = false;
    }
    
    /// 获取状态快照
    pub async fn get_snapshot(&self) -> ClusterState {
        self.state.read().await.clone()
    }
    
    /// 更新状态
    pub async fn update_state<F>(&self, updater: F)
    where
        F: FnOnce(&mut ClusterState),
    {
        let mut state = self.state.write().await;
        updater(&mut state);
    }
}

// =============================================================================
// Trait实现
// =============================================================================

impl ClusterStateInfo for ClusterState {
    fn cluster_id(&self) -> &str {
        &self.cluster_id
    }
    
    fn current_epoch(&self) -> u64 {
        self.current_epoch
    }
    
    fn config_epoch(&self) -> u64 {
        // 使用current_epoch作为config_epoch
        self.current_epoch
    }
    
    fn set_current_epoch(&mut self, epoch: u64) {
        self.current_epoch = epoch;
        self.last_updated = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs();
    }
    
    fn increment_config_epoch(&mut self) -> u64 {
        self.current_epoch += 1;
        self.last_updated = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs();
        self.current_epoch
    }
    
    fn size(&self) -> usize {
        self.nodes.len()
    }
    
    fn is_healthy(&self) -> bool {
        match self.state {
            ClusterStateFlag::Ok => true,
            ClusterStateFlag::Pfail => {
                // 只要大部分节点在线就认为健康
                let online_nodes = self.nodes.values()
                    .filter(|node| node.is_online())
                    .count();
                online_nodes > self.nodes.len() / 2
            },
            _ => false,
        }
    }
    
    fn get_summary(&self) -> String {
        format!(
            "Cluster {} (epoch: {}, nodes: {}, state: {:?})",
            &self.cluster_id[..8], // 只显示前8位
            self.current_epoch,
            self.nodes.len(),
            self.state
        )
    }
}

#[async_trait]
impl StateManagerTrait for ClusterStateManager {
    type State = ClusterState;
    
    async fn get_state(&self) -> &Self::State {
        // 注意：这里的实现有问题，因为RwLock不能返回借用的引用
        // 这里作为示例，实际使用中需要调整接口设计
        unimplemented!("StateManager trait需要重新设计以适配异步环境")
    }
    
    async fn get_state_mut(&mut self) -> &mut Self::State {
        // 同样的问题
        unimplemented!("StateManager trait需要重新设计以适配异步环境")
    }
    
    async fn update_state<F>(&mut self, updater: F) -> ClusterResult<()>
    where
        F: FnOnce(&mut Self::State) -> ClusterResult<()> + Send
    {
        let mut state = self.state.write().await;
        updater(&mut state)
    }

    async fn save_state(&self) -> ClusterResult<()> {
        self.save_to_file().await
            .map_err(|e| ClusterError::Config(format!("保存状态失败: {e}")))
    }
    
    async fn load_state(&mut self) -> ClusterResult<()> {
        self.load_from_file().await
            .map_err(|e| ClusterError::Config(format!("加载状态失败: {e}")))
    }

    async fn get_state_version(&self) -> u64 {
        let state = self.state.read().await;
        state.current_epoch
    }
    
    async fn is_state_consistent(&self, other_version: u64) -> bool {
        let state = self.state.read().await;
        state.current_epoch == other_version
    }
}

impl ClusterStateManager {
    /// 保存状态到文件
    async fn save_to_file(&self) -> Result<(), String> {
        // 简化实现，实际中应该根据配置保存到文件
        // 这里只返回Ok作为占位符
        Ok(())
    }
    
    /// 从文件加载状态
    async fn load_from_file(&mut self) -> Result<(), String> {
        // 简化实现，实际中应该从文件加载状态
        // 这里只返回Ok作为占位符
        Ok(())
    }
}

