//! 集群数据同步模块

use crate::manager::ClusterManager;
use nacos_core::{Result, NacosError};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use chrono::{DateTime, Utc};
use tracing::{info, warn};
use uuid::Uuid;

/// 数据同步类型
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum SyncType {
    /// 全量同步
    Full,
    /// 增量同步
    Incremental,
    /// 心跳同步
    Heartbeat,
    /// 冲突解决
    ConflictResolution,
}

/// 数据操作类型
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum DataOperation {
    /// 创建
    Create,
    /// 更新
    Update,
    /// 删除
    Delete,
    /// 批量操作
    Batch,
}

/// 数据同步消息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncMessage {
    /// 消息ID
    pub message_id: String,
    /// 源节点ID
    pub source_node_id: String,
    /// 目标节点ID
    pub target_node_id: Option<String>,
    /// 同步类型
    pub sync_type: SyncType,
    /// 数据命名空间
    pub namespace: String,
    /// 数据分组
    pub group: String,
    /// 数据键
    pub data_key: String,
    /// 数据值
    pub data_value: Option<Vec<u8>>,
    /// 数据版本
    pub version: u64,
    /// 时间戳
    pub timestamp: DateTime<Utc>,
    /// 操作类型
    pub operation: DataOperation,
    /// 校验和
    pub checksum: u64,
}

impl SyncMessage {
    /// 创建新的同步消息
    pub fn new(
        source_node_id: String,
        namespace: String,
        group: String,
        data_key: String,
        sync_type: SyncType,
        operation: DataOperation,
    ) -> Self {
        let message_id = Uuid::new_v4().to_string();
        Self {
            message_id,
            source_node_id,
            target_node_id: None,
            sync_type,
            namespace,
            group,
            data_key,
            data_value: None,
            version: 1,
            timestamp: Utc::now(),
            operation,
            checksum: 0,
        }
    }

    /// 计算校验和
    pub fn calculate_checksum(&self) -> u64 {
        use std::hash::{Hash, Hasher};
        use std::collections::hash_map::DefaultHasher;
        
        let mut hasher = DefaultHasher::new();
        self.message_id.hash(&mut hasher);
        self.source_node_id.hash(&mut hasher);
        self.namespace.hash(&mut hasher);
        self.group.hash(&mut hasher);
        self.data_key.hash(&mut hasher);
        if let Some(ref data) = self.data_value {
            data.hash(&mut hasher);
        }
        self.version.hash(&mut hasher);
        self.timestamp.to_rfc3339().hash(&mut hasher);
        hasher.finish()
    }

    /// 验证校验和
    pub fn verify_checksum(&self) -> bool {
        self.checksum == self.calculate_checksum()
    }

    /// 设置数据内容
    pub fn with_data(mut self, data: Vec<u8>) -> Self {
        self.data_value = Some(data);
        self.checksum = self.calculate_checksum();
        self
    }

    /// 设置目标节点
    pub fn with_target(mut self, target_node_id: String) -> Self {
        self.target_node_id = Some(target_node_id);
        self.checksum = self.calculate_checksum();
        self
    }
}

/// 数据同步状态
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SyncStatus {
    /// 待同步
    Pending,
    /// 同步中
    Syncing,
    /// 同步成功
    Completed,
    /// 同步失败
    Failed(String),
    /// 冲突待解决
    Conflict,
}

/// 数据版本信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DataVersion {
    /// 数据版本号
    pub version: u64,
    /// 最后更新时间
    pub last_updated: DateTime<Utc>,
    /// 最后更新节点
    pub last_updated_by: String,
    /// 校验和
    pub checksum: u64,
}

/// 数据同步记录
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncRecord {
    /// 记录ID
    pub record_id: String,
    /// 消息ID
    pub message_id: String,
    /// 同步状态
    pub status: SyncStatus,
    /// 重试次数
    pub retry_count: u32,
    /// 创建时间
    pub created_at: DateTime<Utc>,
    /// 更新时间
    pub updated_at: DateTime<Utc>,
    /// 错误信息
    pub error_message: Option<String>,
}

impl SyncRecord {
    /// 创建新的同步记录
    pub fn new(message_id: String) -> Self {
        let record_id = Uuid::new_v4().to_string();
        let now = Utc::now();
        Self {
            record_id,
            message_id,
            status: SyncStatus::Pending,
            retry_count: 0,
            created_at: now,
            updated_at: now,
            error_message: None,
        }
    }
}

/// 数据同步管理器
#[derive(Debug)]
pub struct DataSyncManager {
    /// 集群管理器
    cluster_manager: Arc<ClusterManager>,
    /// 本地数据存储
    local_data: Arc<RwLock<HashMap<String, HashMap<String, HashMap<String, (Vec<u8>, DataVersion)>>>>>,
    /// 同步记录
    sync_records: Arc<RwLock<HashMap<String, SyncRecord>>>,
    /// 待同步队列
    pending_sync: Arc<RwLock<Vec<SyncMessage>>>,
    /// 最大重试次数
    max_retry_count: u32,
    /// 批量同步大小
    batch_size: usize,
}

impl DataSyncManager {
    /// 创建新的数据同步管理器
    pub fn new(cluster_manager: Arc<ClusterManager>, max_retry_count: u32, batch_size: usize) -> Self {
        Self {
            cluster_manager,
            local_data: Arc::new(RwLock::new(HashMap::new())),
            sync_records: Arc::new(RwLock::new(HashMap::new())),
            pending_sync: Arc::new(RwLock::new(Vec::new())),
            max_retry_count,
            batch_size,
        }
    }

    /// 存储数据
    pub async fn store_data(
        &self,
        namespace: String,
        group: String,
        data_key: String,
        data: Vec<u8>,
        source_node_id: String,
    ) -> Result<()> {
        let version = self.get_next_version(&namespace, &group, &data_key).await;
        let checksum = self.calculate_data_checksum(&data);
        
        let data_version = DataVersion {
            version,
            last_updated: Utc::now(),
            last_updated_by: source_node_id.clone(),
            checksum,
        };

        // 存储到本地
        {
            let mut local_data = self.local_data.write().await;
            local_data
                .entry(namespace.clone())
                .or_insert_with(HashMap::new)
                .entry(group.clone())
                .or_insert_with(HashMap::new)
                .insert(data_key.clone(), (data.clone(), data_version.clone()));
        }

        // 创建同步消息
        let message = SyncMessage::new(
            source_node_id,
            namespace,
            group,
            data_key,
            SyncType::Incremental,
            DataOperation::Update,
        ).with_data(data);

        // 添加到同步队列
        self.queue_sync_message(message).await;

        Ok(())
    }

    /// 获取数据
    pub async fn get_data(
        &self,
        namespace: &str,
        group: &str,
        data_key: &str,
    ) -> Result<Option<(Vec<u8>, DataVersion)>> {
        let local_data = self.local_data.read().await;
        Ok(local_data
            .get(namespace)
            .and_then(|ns| ns.get(group))
            .and_then(|grp| grp.get(data_key))
            .cloned())
    }

    /// 删除数据
    pub async fn delete_data(
        &self,
        namespace: String,
        group: String,
        data_key: String,
        source_node_id: String,
    ) -> Result<()> {
        // 从本地删除
        {
            let mut local_data = self.local_data.write().await;
            if let Some(ns) = local_data.get_mut(&namespace) {
                if let Some(grp) = ns.get_mut(&group) {
                    grp.remove(&data_key);
                }
            }
        }

        // 创建同步消息
        let message = SyncMessage::new(
            source_node_id,
            namespace,
            group,
            data_key,
            SyncType::Incremental,
            DataOperation::Delete,
        );

        self.queue_sync_message(message).await;

        Ok(())
    }

    /// 获取下一个版本号
    async fn get_next_version(&self, namespace: &str, group: &str, data_key: &str) -> u64 {
        let local_data = self.local_data.read().await;
        if let Some(data) = local_data
            .get(namespace)
            .and_then(|ns| ns.get(group))
            .and_then(|grp| grp.get(data_key))
        {
            data.1.version + 1
        } else {
            1
        }
    }

    /// 计算数据校验和
    fn calculate_data_checksum(&self, data: &[u8]) -> u64 {
        use std::hash::{Hash, Hasher};
        use std::collections::hash_map::DefaultHasher;
        
        let mut hasher = DefaultHasher::new();
        data.hash(&mut hasher);
        hasher.finish()
    }

    /// 添加同步消息到队列
    async fn queue_sync_message(&self, message: SyncMessage) {
        let mut pending = self.pending_sync.write().await;
        pending.push(message);
    }

    /// 处理同步消息
    pub async fn process_sync_message(&self, message: SyncMessage) -> Result<()> {
        // 验证消息
        if !message.verify_checksum() {
            return Err(NacosError::Validation("Invalid message checksum".to_string()));
        }

        // 检查版本冲突
        if let Some(existing) = self.get_data(&message.namespace, &message.group, &message.data_key
        ).await? {
            if existing.1.version >= message.version {
                // 版本冲突，需要解决
                return self.resolve_conflict(&message, &existing.1).await;
            }
        }

        // 应用数据变更
        self.apply_sync_message(message).await
    }

    /// 解决版本冲突
    async fn resolve_conflict(&self, message: &SyncMessage, existing: &DataVersion) -> Result<()> {
        // 使用时间戳作为冲突解决策略
        if message.timestamp > existing.last_updated {
            // 使用较新的数据
            warn!("Resolving conflict: using newer data from {}", message.source_node_id);
            return self.apply_sync_message(message.clone()).await;
        } else if message.timestamp < existing.last_updated {
            // 忽略旧数据
            info!("Ignoring outdated data from {}", message.source_node_id);
        } else {
            // 时间戳相同，使用节点ID字典序
            if message.source_node_id > existing.last_updated_by {
                return self.apply_sync_message(message.clone()).await;
            }
        }

        Ok(())
    }

    /// 实际应用同步消息（避免递归）
    async fn apply_sync_message(&self, message: SyncMessage) -> Result<()> {
        // 验证消息
        if !message.verify_checksum() {
            return Err(NacosError::Validation("Invalid message checksum".to_string()));
        }

        match message.operation {
            DataOperation::Create | DataOperation::Update => {
                if let Some(data) = &message.data_value {
                    let data_version = DataVersion {
                        version: message.version,
                        last_updated: message.timestamp,
                        last_updated_by: message.source_node_id.clone(),
                        checksum: self.calculate_data_checksum(data),
                    };

                    let mut local_data = self.local_data.write().await;
                    local_data
                        .entry(message.namespace.clone())
                        .or_insert_with(HashMap::new)
                        .entry(message.group.clone())
                        .or_insert_with(HashMap::new)
                        .insert(message.data_key.clone(), (data.clone(), data_version));
                }
            }
            DataOperation::Delete => {
                let mut local_data = self.local_data.write().await;
                if let Some(ns) = local_data.get_mut(&message.namespace) {
                    if let Some(grp) = ns.get_mut(&message.group) {
                        grp.remove(&message.data_key);
                    }
                }
            }
            DataOperation::Batch => {
                // 批量操作处理
                unimplemented!("Batch operations not yet implemented");
            }
        }

        Ok(())
    }

    /// 同步到所有节点
    pub async fn sync_to_all_nodes(&self, message: SyncMessage) -> Result<()> {
        let healthy_nodes = self.cluster_manager.get_healthy_nodes().await;
        let current_node = self.cluster_manager.get_current_node().await;

        for node in healthy_nodes {
            if let Some(ref current) = current_node {
                if node.node_id == current.node_id {
                    continue; // 跳过当前节点
                }
            }

            let target_message = message.clone().with_target(node.node_id.clone());
            self.queue_sync_message(target_message).await;
        }

        Ok(())
    }

    /// 执行全量同步
    pub async fn perform_full_sync(&self, target_node_id: Option<String>) -> Result<()> {
        let local_data = self.local_data.read().await;
        
        for (namespace, groups) in local_data.iter() {
            for (group, data_map) in groups.iter() {
                for (data_key, (data, version)) in data_map.iter() {
                    let message = SyncMessage::new(
                        self.cluster_manager.get_current_node().await
                            .map(|n| n.node_id)
                            .unwrap_or_else(|| "unknown".to_string()),
                        namespace.clone(),
                        group.clone(),
                        data_key.clone(),
                        SyncType::Full,
                        DataOperation::Update,
                    )
                    .with_data(data.clone());

                    if let Some(ref target) = target_node_id {
                        let target_message = message.with_target(target.clone());
                        self.queue_sync_message(target_message).await;
                    } else {
                        self.sync_to_all_nodes(message).await?;
                    }
                }
            }
        }

        Ok(())
    }

    /// 获取同步统计信息
    pub async fn get_sync_stats(&self) -> SyncStats {
        let pending = self.pending_sync.read().await;
        let records = self.sync_records.read().await;

        let mut pending_count = pending.len();
        let mut completed_count = 0;
        let mut failed_count = 0;

        for record in records.values() {
            match record.status {
                SyncStatus::Completed => completed_count += 1,
                SyncStatus::Failed(_) => failed_count += 1,
                _ => pending_count += 1,
            }
        }

        SyncStats {
            pending_sync_count: pending_count,
            completed_sync_count: completed_count,
            failed_sync_count: failed_count,
            total_records: records.len(),
        }
    }

    /// 清理过期同步记录
    pub async fn cleanup_expired_records(&self, max_age_hours: i64) -> Result<()> {
        let mut records = self.sync_records.write().await;
        let cutoff = Utc::now() - chrono::Duration::hours(max_age_hours);

        records.retain(|_, record| record.created_at > cutoff);
        Ok(())
    }
}

/// 同步统计信息
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncStats {
    /// 待同步数量
    pub pending_sync_count: usize,
    /// 已完成同步数量
    pub completed_sync_count: usize,
    /// 失败同步数量
    pub failed_sync_count: usize,
    /// 总记录数
    pub total_records: usize,
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::manager::{ClusterConfig, ClusterManager};

    #[tokio::test]
    async fn test_sync_message_creation() {
        let message = SyncMessage::new(
            "node1".to_string(),
            "public".to_string(),
            "DEFAULT_GROUP".to_string(),
            "test-key".to_string(),
            SyncType::Incremental,
            DataOperation::Update,
        ).with_data(b"test data".to_vec());

        assert_eq!(message.namespace, "public");
        assert_eq!(message.group, "DEFAULT_GROUP");
        assert_eq!(message.data_key, "test-key");
        assert!(message.verify_checksum());
    }

    #[tokio::test]
    async fn test_data_storage_and_retrieval() {
        let config = ClusterConfig::default();
        let cluster_manager = Arc::new(ClusterManager::new(config));
        let sync_manager = DataSyncManager::new(cluster_manager, 3, 100);

        let data = b"test data".to_vec();
        sync_manager.store_data(
            "public".to_string(),
            "DEFAULT_GROUP".to_string(),
            "test-key".to_string(),
            data.clone(),
            "node1".to_string(),
        ).await.unwrap();

        let retrieved = sync_manager.get_data("public", "DEFAULT_GROUP", "test-key").await.unwrap();
        assert!(retrieved.is_some());
        assert_eq!(retrieved.unwrap().0, data);
    }

    #[tokio::test]
    async fn test_version_conflict_resolution() {
        let config = ClusterConfig::default();
        let cluster_manager = Arc::new(ClusterManager::new(config));
        let sync_manager = DataSyncManager::new(cluster_manager, 3, 100);

        // 存储初始数据
        sync_manager.store_data(
            "public".to_string(),
            "DEFAULT_GROUP".to_string(),
            "test-key".to_string(),
            b"initial".to_vec(),
            "node1".to_string(),
        ).await.unwrap();

        // 模拟冲突 - 相同版本
        let message = SyncMessage::new(
            "node2".to_string(),
            "public".to_string(),
            "DEFAULT_GROUP".to_string(),
            "test-key".to_string(),
            SyncType::Incremental,
            DataOperation::Update,
        )
        .with_data(b"conflict".to_vec());

        let result = sync_manager.process_sync_message(message).await;
        assert!(result.is_ok());
    }
}