// Distributed storage implementation with Redis clustering and sharding
use crate::redis::{RedisStorage, RedisConfig};
use codegraph_core::{
    Result, CodeGraphError, GraphStorage, Transaction, GraphQuery, QueryResult,
    CodeNode, CodeEdge, NodeId, EdgeId, EdgeType,
};
use async_trait::async_trait;
use std::collections::{HashMap, hash_map::DefaultHasher};
use std::hash::{Hash, Hasher};
use std::sync::Arc;
use tokio::sync::RwLock;
use serde::{Serialize, Deserialize};

/// Configuration for distributed storage
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DistributedConfig {
    /// List of Redis nodes for sharding
    pub shard_nodes: Vec<RedisConfig>,
    /// Replication factor (number of copies)
    pub replication_factor: usize,
    /// Consistency level for reads
    pub read_consistency: ConsistencyLevel,
    /// Consistency level for writes
    pub write_consistency: ConsistencyLevel,
    /// Enable automatic failover
    pub auto_failover: bool,
    /// Health check interval in seconds
    pub health_check_interval: u64,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ConsistencyLevel {
    /// Read/write from any available node
    Any,
    /// Read/write from majority of nodes
    Majority,
    /// Read/write from all nodes
    All,
}

impl Default for DistributedConfig {
    fn default() -> Self {
        Self {
            shard_nodes: vec![RedisConfig::default()],
            replication_factor: 3,
            read_consistency: ConsistencyLevel::Majority,
            write_consistency: ConsistencyLevel::Majority,
            auto_failover: true,
            health_check_interval: 30,
        }
    }
}

/// Distributed storage implementation with sharding and replication
pub struct DistributedStorage {
    config: DistributedConfig,
    shards: Arc<RwLock<Vec<ShardInfo>>>,
    hash_ring: Arc<RwLock<ConsistentHashRing>>,
}

/// Information about a storage shard
#[derive(Debug, Clone)]
struct ShardInfo {
    id: usize,
    primary: Arc<RedisStorage>,
    replicas: Vec<Arc<RedisStorage>>,
    is_healthy: bool,
    last_health_check: std::time::Instant,
}

/// Consistent hashing ring for data distribution
#[derive(Debug)]
struct ConsistentHashRing {
    ring: std::collections::BTreeMap<u64, usize>, // hash -> shard_id
    virtual_nodes: usize,
}

impl ConsistentHashRing {
    fn new(shard_count: usize, virtual_nodes: usize) -> Self {
        let mut ring = std::collections::BTreeMap::new();

        // Create virtual nodes for each shard
        for shard_id in 0..shard_count {
            for vnode in 0..virtual_nodes {
                let key = format!("shard-{}-vnode-{}", shard_id, vnode);
                let hash = Self::hash_key(&key);
                ring.insert(hash, shard_id);
            }
        }

        Self {
            ring,
            virtual_nodes,
        }
    }

    fn hash_key(key: &str) -> u64 {
        let mut hasher = DefaultHasher::new();
        key.hash(&mut hasher);
        hasher.finish()
    }

    fn get_shard(&self, key: &str) -> usize {
        let hash = Self::hash_key(key);

        // Find the first shard with hash >= key hash
        if let Some((&_, &shard_id)) = self.ring.range(hash..).next() {
            shard_id
        } else {
            // Wrap around to the first shard
            self.ring.values().next().copied().unwrap_or(0)
        }
    }

    fn get_replica_shards(&self, key: &str, count: usize) -> Vec<usize> {
        let primary_shard = self.get_shard(key);
        let mut shards = vec![primary_shard];

        let hash = Self::hash_key(key);
        let mut current_shards = std::collections::HashSet::new();
        current_shards.insert(primary_shard);

        // Find additional shards for replicas
        for (&_, &shard_id) in self.ring.range(hash..).skip(1) {
            if !current_shards.contains(&shard_id) && shards.len() < count {
                shards.push(shard_id);
                current_shards.insert(shard_id);
            }
            if shards.len() >= count {
                break;
            }
        }

        // Wrap around if needed
        if shards.len() < count {
            for (&_, &shard_id) in &self.ring {
                if !current_shards.contains(&shard_id) && shards.len() < count {
                    shards.push(shard_id);
                    current_shards.insert(shard_id);
                }
                if shards.len() >= count {
                    break;
                }
            }
        }

        shards
    }
}

impl DistributedStorage {
    /// Create a new distributed storage system
    pub async fn new(config: DistributedConfig) -> Result<Self> {
        let mut shards = Vec::new();

        // Initialize shards
        for (i, shard_config) in config.shard_nodes.iter().enumerate() {
            let primary = Arc::new(RedisStorage::new(shard_config.clone()).await?);
            let mut replicas = Vec::new();

            // Create replicas (for now, just use the same config)
            // In a real implementation, replicas would be on different nodes
            for _ in 1..config.replication_factor {
                let replica = Arc::new(RedisStorage::new(shard_config.clone()).await?);
                replicas.push(replica);
            }

            shards.push(ShardInfo {
                id: i,
                primary,
                replicas,
                is_healthy: true,
                last_health_check: std::time::Instant::now(),
            });
        }

        let hash_ring = ConsistentHashRing::new(shards.len(), 150); // 150 virtual nodes per shard

        Ok(Self {
            config,
            shards: Arc::new(RwLock::new(shards)),
            hash_ring: Arc::new(RwLock::new(hash_ring)),
        })
    }

    /// Get the shard for a given key
    pub async fn get_shard_for_key(&self, key: &str) -> Result<usize> {
        let hash_ring = self.hash_ring.read().await;
        Ok(hash_ring.get_shard(key))
    }

    /// Get replica shards for a given key
    async fn get_replica_shards_for_key(&self, key: &str) -> Result<Vec<usize>> {
        let hash_ring = self.hash_ring.read().await;
        Ok(hash_ring.get_replica_shards(key, self.config.replication_factor))
    }

    /// Execute operation on multiple shards based on consistency level
    async fn execute_on_shards<T, F, Fut>(
        &self,
        key: &str,
        consistency: &ConsistencyLevel,
        operation: F,
    ) -> Result<T>
    where
        F: Fn(Arc<RedisStorage>) -> Fut + Send + Sync + Clone + 'static,
        Fut: std::future::Future<Output = Result<T>> + Send + 'static,
        T: Send + 'static,
    {
        let shard_ids = self.get_replica_shards_for_key(key).await?;
        let shards = self.shards.read().await;

        let required_success = match consistency {
            ConsistencyLevel::Any => 1,
            ConsistencyLevel::Majority => (shard_ids.len() / 2) + 1,
            ConsistencyLevel::All => shard_ids.len(),
        };

        let mut tasks = Vec::new();
        for &shard_id in &shard_ids {
            if let Some(shard) = shards.get(shard_id) {
                if shard.is_healthy {
                    let storage = shard.primary.clone();
                    let op = operation.clone();
                    tasks.push(tokio::spawn(async move { op(storage).await }));
                }
            }
        }

        let mut success_count = 0;
        let mut first_result = None;

        // Wait for results
        for task in tasks {
            match task.await {
                Ok(Ok(result)) => {
                    success_count += 1;
                    if first_result.is_none() {
                        first_result = Some(result);
                    }
                    if success_count >= required_success {
                        break;
                    }
                }
                Ok(Err(_)) | Err(_) => {
                    // Handle individual shard failures
                    continue;
                }
            }
        }

        if success_count >= required_success {
            first_result.ok_or_else(|| {
                CodeGraphError::storage_error("No successful operations despite meeting consistency requirements".to_string())
            })
        } else {
            Err(CodeGraphError::storage_error(format!(
                "Failed to meet consistency requirements: {}/{} successful operations",
                success_count, required_success
            )))
        }
    }

    /// Perform health check on all shards
    pub async fn health_check(&self) -> Result<HashMap<usize, bool>> {
        let mut health_status = HashMap::new();
        let mut shards = self.shards.write().await;

        for shard in shards.iter_mut() {
            // Simple ping test
            let is_healthy = match shard.primary.exists("health_check").await {
                Ok(_) => true,
                Err(_) => false,
            };

            shard.is_healthy = is_healthy;
            shard.last_health_check = std::time::Instant::now();
            health_status.insert(shard.id, is_healthy);
        }

        Ok(health_status)
    }

    /// Get cluster statistics
    pub async fn get_cluster_stats(&self) -> Result<ClusterStats> {
        let shards = self.shards.read().await;
        let total_shards = shards.len();
        let healthy_shards = shards.iter().filter(|s| s.is_healthy).count();

        Ok(ClusterStats {
            total_shards,
            healthy_shards,
            replication_factor: self.config.replication_factor,
            consistency_level: self.config.read_consistency.clone(),
        })
    }
}

/// Cluster statistics
#[derive(Debug, Serialize, Deserialize)]
pub struct ClusterStats {
    pub total_shards: usize,
    pub healthy_shards: usize,
    pub replication_factor: usize,
    pub consistency_level: ConsistencyLevel,
}

// Implement GraphStorage trait for DistributedStorage
#[async_trait]
impl GraphStorage for DistributedStorage {
    async fn save_node(&self, node: &dyn CodeNode) -> Result<()> {
        let key = format!("node:{}", node.id());
        let shard_id = self.get_shard_for_key(&key).await?;

        let shards = self.shards.read().await;
        if let Some(shard) = shards.get(shard_id) {
            if shard.is_healthy {
                shard.primary.save_node(node).await?;
            } else {
                return Err(CodeGraphError::storage_error("Primary shard is unhealthy".to_string()));
            }
        } else {
            return Err(CodeGraphError::storage_error("Shard not found".to_string()));
        }

        Ok(())
    }

    async fn get_node(&self, id: NodeId) -> Result<Option<Box<dyn CodeNode>>> {
        let key = format!("node:{}", id);
        let shard_id = self.get_shard_for_key(&key).await?;

        let shards = self.shards.read().await;
        if let Some(shard) = shards.get(shard_id) {
            if shard.is_healthy {
                shard.primary.get_node(id).await
            } else {
                Err(CodeGraphError::storage_error("Primary shard is unhealthy".to_string()))
            }
        } else {
            Err(CodeGraphError::storage_error("Shard not found".to_string()))
        }
    }

    async fn delete_node(&self, id: NodeId) -> Result<bool> {
        let key = format!("node:{}", id);
        let shard_id = self.get_shard_for_key(&key).await?;

        let shards = self.shards.read().await;
        if let Some(shard) = shards.get(shard_id) {
            if shard.is_healthy {
                shard.primary.delete_node(id).await
            } else {
                Err(CodeGraphError::storage_error("Primary shard is unhealthy".to_string()))
            }
        } else {
            Err(CodeGraphError::storage_error("Shard not found".to_string()))
        }
    }

    async fn save_edge(&self, edge: &dyn CodeEdge) -> Result<()> {
        let key = format!("edge:{}", edge.id());
        let shard_id = self.get_shard_for_key(&key).await?;

        let shards = self.shards.read().await;
        if let Some(shard) = shards.get(shard_id) {
            if shard.is_healthy {
                shard.primary.save_edge(edge).await?;
            } else {
                return Err(CodeGraphError::storage_error("Primary shard is unhealthy".to_string()));
            }
        } else {
            return Err(CodeGraphError::storage_error("Shard not found".to_string()));
        }

        Ok(())
    }

    async fn get_edges(&self, node_id: NodeId, edge_type: Option<&EdgeType>) -> Result<Vec<Box<dyn CodeEdge>>> {
        let key = format!("node:{}:edges", node_id);
        let shard_id = self.get_shard_for_key(&key).await?;

        let shards = self.shards.read().await;
        if let Some(shard) = shards.get(shard_id) {
            if shard.is_healthy {
                shard.primary.get_edges(node_id, edge_type).await
            } else {
                Err(CodeGraphError::storage_error("Primary shard is unhealthy".to_string()))
            }
        } else {
            Err(CodeGraphError::storage_error("Shard not found".to_string()))
        }
    }

    async fn delete_edge(&self, id: EdgeId) -> Result<bool> {
        let key = format!("edge:{}", id);
        let shard_id = self.get_shard_for_key(&key).await?;

        let shards = self.shards.read().await;
        if let Some(shard) = shards.get(shard_id) {
            if shard.is_healthy {
                shard.primary.delete_edge(id).await
            } else {
                Err(CodeGraphError::storage_error("Primary shard is unhealthy".to_string()))
            }
        } else {
            Err(CodeGraphError::storage_error("Shard not found".to_string()))
        }
    }

    async fn save_batch(
        &self,
        nodes: &[&dyn CodeNode],
        edges: &[&dyn CodeEdge],
    ) -> Result<()> {
        // For simplicity, just save each node and edge individually
        // In a real implementation, we'd optimize this with proper batching

        for node in nodes {
            self.save_node(*node).await?;
        }

        for edge in edges {
            self.save_edge(*edge).await?;
        }

        Ok(())
    }

    async fn query(&self, query: &GraphQuery) -> Result<QueryResult> {
        // For distributed queries, we need to query all shards and merge results
        let shards = self.shards.read().await;
        let mut tasks = Vec::new();

        for shard in shards.iter() {
            if shard.is_healthy {
                let storage = shard.primary.clone();
                let query_clone = query.clone();
                tasks.push(tokio::spawn(async move {
                    storage.query(&query_clone).await
                }));
            }
        }

        // Collect and merge results
        let mut merged_result = QueryResult {
            nodes: Vec::new(),
            edges: Vec::new(),
            metadata: HashMap::new(),
            execution_time_ms: 0,
        };

        let mut max_execution_time = 0;

        for task in tasks {
            match task.await {
                Ok(Ok(result)) => {
                    merged_result.nodes.extend(result.nodes);
                    merged_result.edges.extend(result.edges);
                    max_execution_time = max_execution_time.max(result.execution_time_ms);

                    // Merge metadata
                    for (key, value) in result.metadata {
                        merged_result.metadata.insert(key, value);
                    }
                }
                Ok(Err(e)) => {
                    return Err(e);
                }
                Err(e) => {
                    return Err(CodeGraphError::storage_error(format!("Query task failed: {}", e)));
                }
            }
        }

        merged_result.execution_time_ms = max_execution_time;
        Ok(merged_result)
    }

    async fn begin_transaction(&self) -> Result<Box<dyn Transaction>> {
        // For distributed transactions, we'd need a distributed transaction coordinator
        // For now, return a simple transaction that operates on the first shard
        let shards = self.shards.read().await;
        if let Some(shard) = shards.first() {
            shard.primary.begin_transaction().await
        } else {
            Err(CodeGraphError::storage_error("No healthy shards available".to_string()))
        }
    }
}

impl Default for DistributedStorage {
    fn default() -> Self {
        // This is a placeholder - in practice, you'd need to call new() with proper config
        Self {
            config: DistributedConfig::default(),
            shards: Arc::new(RwLock::new(Vec::new())),
            hash_ring: Arc::new(RwLock::new(ConsistentHashRing::new(1, 150))),
        }
    }
}
