// RocksDB storage implementation for persistent graph storage
use crate::{StorageBackend, Result, StorageError};
use codegraph_core::{NodeId, EdgeId, CodeNode, CodeEdge};
use rocksdb::{DB, Options, ColumnFamily, ColumnFamilyDescriptor, WriteBatch, IteratorMode};
use serde::{Serialize, Deserialize};
use std::path::Path;
use std::sync::Arc;
use std::collections::HashMap;

/// Column family names for different data types
const CF_NODES: &str = "nodes";
const CF_EDGES: &str = "edges";
const CF_METADATA: &str = "metadata";
const CF_INDICES: &str = "indices";
const CF_RELATIONSHIPS: &str = "relationships";

/// RocksDB storage backend for persistent graph data
pub struct RocksDbStorage {
    /// RocksDB database instance
    db: Arc<DB>,
    /// Database path
    path: String,
    /// Configuration options
    config: RocksDbConfig,
}

/// Configuration for RocksDB storage
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RocksDbConfig {
    /// Maximum number of open files
    pub max_open_files: i32,
    /// Write buffer size in bytes
    pub write_buffer_size: usize,
    /// Maximum write buffer number
    pub max_write_buffer_number: i32,
    /// Target file size base
    pub target_file_size_base: u64,
    /// Maximum bytes for level base
    pub max_bytes_for_level_base: u64,
    /// Enable compression
    pub enable_compression: bool,
    /// Block cache size in bytes
    pub block_cache_size: usize,
    /// Enable bloom filter
    pub enable_bloom_filter: bool,
    /// Bloom filter bits per key
    pub bloom_filter_bits_per_key: i32,
}

impl Default for RocksDbConfig {
    fn default() -> Self {
        Self {
            max_open_files: 1000,
            write_buffer_size: 64 * 1024 * 1024, // 64MB
            max_write_buffer_number: 3,
            target_file_size_base: 64 * 1024 * 1024, // 64MB
            max_bytes_for_level_base: 256 * 1024 * 1024, // 256MB
            enable_compression: true,
            block_cache_size: 128 * 1024 * 1024, // 128MB
            enable_bloom_filter: true,
            bloom_filter_bits_per_key: 10,
        }
    }
}

/// Serializable node data for storage
#[derive(Debug, Clone, Serialize, Deserialize)]
struct StoredNode {
    id: NodeId,
    node_type: String,
    name: String,
    language: String,
    location: StoredLocation,
    metadata: HashMap<String, String>,
    attributes: HashMap<String, String>,
    created_at: i64,
    modified_at: i64,
}

/// Serializable edge data for storage
#[derive(Debug, Clone, Serialize, Deserialize)]
struct StoredEdge {
    id: EdgeId,
    source: NodeId,
    target: NodeId,
    edge_type: String,
    metadata: HashMap<String, String>,
    attributes: HashMap<String, String>,
    strength: f64,
    confidence: f64,
    created_at: i64,
    modified_at: i64,
}

/// Serializable location data
#[derive(Debug, Clone, Serialize, Deserialize)]
struct StoredLocation {
    file_path: String,
    start_line: usize,
    start_column: usize,
    end_line: usize,
    end_column: usize,
}

impl RocksDbStorage {
    /// Create a new RocksDB storage instance
    pub fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
        Self::with_config(path, RocksDbConfig::default())
    }

    /// Create a new RocksDB storage instance with custom configuration
    pub fn with_config<P: AsRef<Path>>(path: P, config: RocksDbConfig) -> Result<Self> {
        let path_str = path.as_ref().to_string_lossy().to_string();

        // Configure RocksDB options
        let mut opts = Options::default();
        opts.create_if_missing(true);
        opts.create_missing_column_families(true);
        opts.set_max_open_files(config.max_open_files);
        opts.set_write_buffer_size(config.write_buffer_size);
        opts.set_max_write_buffer_number(config.max_write_buffer_number);
        opts.set_target_file_size_base(config.target_file_size_base);
        opts.set_max_bytes_for_level_base(config.max_bytes_for_level_base);

        if config.enable_compression {
            opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
        }

        // Configure block-based table options
        let mut block_opts = rocksdb::BlockBasedOptions::default();
        block_opts.set_block_cache(&rocksdb::Cache::new_lru_cache(config.block_cache_size));

        if config.enable_bloom_filter {
            block_opts.set_bloom_filter(config.bloom_filter_bits_per_key, false);
        }

        opts.set_block_based_table_factory(&block_opts);

        // Define column families
        let cfs = vec![
            ColumnFamilyDescriptor::new(CF_NODES, opts.clone()),
            ColumnFamilyDescriptor::new(CF_EDGES, opts.clone()),
            ColumnFamilyDescriptor::new(CF_METADATA, opts.clone()),
            ColumnFamilyDescriptor::new(CF_INDICES, opts.clone()),
            ColumnFamilyDescriptor::new(CF_RELATIONSHIPS, opts.clone()),
        ];

        // Open database
        let db = DB::open_cf_descriptors(&opts, &path_str, cfs)
            .map_err(|e| StorageError::database_error(format!("Failed to open RocksDB: {}", e)))?;

        Ok(Self {
            db: Arc::new(db),
            path: path_str,
            config,
        })
    }

    /// Get column family handle
    fn get_cf(&self, name: &str) -> Result<&ColumnFamily> {
        self.db.cf_handle(name)
            .ok_or_else(|| StorageError::database_error(format!("Column family '{}' not found", name)))
    }

    /// Serialize data to bytes
    fn serialize<T: Serialize>(&self, data: &T) -> Result<Vec<u8>> {
        bincode::serialize(data)
            .map_err(|e| StorageError::serialization_error(format!("Serialization failed: {}", e)).into())
    }

    /// Deserialize data from bytes
    fn deserialize<T: for<'de> Deserialize<'de>>(&self, bytes: &[u8]) -> Result<T> {
        bincode::deserialize(bytes)
            .map_err(|e| StorageError::serialization_error(format!("Deserialization failed: {}", e)).into())
    }

    /// Convert CodeNode to StoredNode
    fn node_to_stored(&self, node: &dyn CodeNode) -> StoredNode {
        let now = chrono::Utc::now().timestamp();
        let location = node.location();

        StoredNode {
            id: node.id(),
            node_type: format!("{:?}", node.node_type()),
            name: node.name().to_string(),
            language: node.language().to_string(),
            location: StoredLocation {
                file_path: location.file_path.clone(),
                start_line: location.start_line as usize,
                start_column: location.start_column as usize,
                end_line: location.end_line as usize,
                end_column: location.end_column as usize,
            },
            metadata: HashMap::new(), // TODO: Extract from node metadata
            attributes: HashMap::new(), // TODO: Extract from node attributes
            created_at: now,
            modified_at: now,
        }
    }

    /// Convert CodeEdge to StoredEdge
    fn edge_to_stored(&self, edge: &dyn CodeEdge) -> StoredEdge {
        let now = chrono::Utc::now().timestamp();

        StoredEdge {
            id: edge.id(),
            source: edge.source(),
            target: edge.target(),
            edge_type: format!("{:?}", edge.edge_type()),
            metadata: HashMap::new(), // TODO: Extract from edge metadata
            attributes: HashMap::new(), // TODO: Extract from edge attributes
            strength: 1.0, // TODO: Extract from edge
            confidence: 1.0, // TODO: Extract from edge
            created_at: now,
            modified_at: now,
        }
    }

    /// Create database indices for efficient querying
    pub async fn create_indices(&self) -> Result<()> {
        let indices_cf = self.get_cf(CF_INDICES)?;

        // Create indices for common query patterns
        let indices = vec![
            ("nodes_by_type", "Index for nodes by type"),
            ("nodes_by_language", "Index for nodes by language"),
            ("nodes_by_name", "Index for nodes by name"),
            ("edges_by_type", "Index for edges by type"),
            ("edges_by_source", "Index for edges by source node"),
            ("edges_by_target", "Index for edges by target node"),
        ];

        for (index_name, description) in indices {
            let index_info = HashMap::from([
                ("name".to_string(), index_name.to_string()),
                ("description".to_string(), description.to_string()),
                ("created_at".to_string(), chrono::Utc::now().timestamp().to_string()),
            ]);

            let key = format!("index:{}", index_name);
            let value = self.serialize(&index_info)?;

            self.db.put_cf(indices_cf, key.as_bytes(), value)?;
        }

        Ok(())
    }

    /// Update index for a node
    async fn update_node_indices(&self, node: &StoredNode) -> Result<()> {
        let indices_cf = self.get_cf(CF_INDICES)?;

        // Update type index
        let type_key = format!("nodes_by_type:{}", node.node_type);
        let mut type_nodes: Vec<NodeId> = self.get_index_entries(&type_key).unwrap_or_default();
        if !type_nodes.contains(&node.id) {
            type_nodes.push(node.id);
            let value = self.serialize(&type_nodes)?;
            self.db.put_cf(indices_cf, type_key.as_bytes(), value)?;
        }

        // Update language index
        let lang_key = format!("nodes_by_language:{}", node.language);
        let mut lang_nodes: Vec<NodeId> = self.get_index_entries(&lang_key).unwrap_or_default();
        if !lang_nodes.contains(&node.id) {
            lang_nodes.push(node.id);
            let value = self.serialize(&lang_nodes)?;
            self.db.put_cf(indices_cf, lang_key.as_bytes(), value)?;
        }

        // Update name index (for prefix searches)
        let name_key = format!("nodes_by_name:{}", node.name);
        let value = self.serialize(&node.id)?;
        self.db.put_cf(indices_cf, name_key.as_bytes(), value)?;

        Ok(())
    }

    /// Update index for an edge
    async fn update_edge_indices(&self, edge: &StoredEdge) -> Result<()> {
        let indices_cf = self.get_cf(CF_INDICES)?;

        // Update type index
        let type_key = format!("edges_by_type:{}", edge.edge_type);
        let mut type_edges: Vec<EdgeId> = self.get_index_entries(&type_key).unwrap_or_default();
        if !type_edges.contains(&edge.id) {
            type_edges.push(edge.id);
            let value = self.serialize(&type_edges)?;
            self.db.put_cf(indices_cf, type_key.as_bytes(), value)?;
        }

        // Update source index
        let source_key = format!("edges_by_source:{}", edge.source);
        let mut source_edges: Vec<EdgeId> = self.get_index_entries(&source_key).unwrap_or_default();
        if !source_edges.contains(&edge.id) {
            source_edges.push(edge.id);
            let value = self.serialize(&source_edges)?;
            self.db.put_cf(indices_cf, source_key.as_bytes(), value)?;
        }

        // Update target index
        let target_key = format!("edges_by_target:{}", edge.target);
        let mut target_edges: Vec<EdgeId> = self.get_index_entries(&target_key).unwrap_or_default();
        if !target_edges.contains(&edge.id) {
            target_edges.push(edge.id);
            let value = self.serialize(&target_edges)?;
            self.db.put_cf(indices_cf, target_key.as_bytes(), value)?;
        }

        Ok(())
    }

    /// Get entries from an index
    fn get_index_entries<T: for<'de> Deserialize<'de>>(&self, key: &str) -> Result<T> {
        let indices_cf = self.get_cf(CF_INDICES)?;

        match self.db.get_cf(indices_cf, key.as_bytes())? {
            Some(value) => self.deserialize(&value),
            None => Err(StorageError::not_found(format!("Index key '{}' not found", key)).into()),
        }
    }

    /// Remove node from indices
    async fn remove_node_from_indices(&self, node: &StoredNode) -> Result<()> {
        let indices_cf = self.get_cf(CF_INDICES)?;

        // Remove from type index
        let type_key = format!("nodes_by_type:{}", node.node_type);
        if let Ok(mut type_nodes) = self.get_index_entries::<Vec<NodeId>>(&type_key) {
            type_nodes.retain(|&id| id != node.id);
            let value = self.serialize(&type_nodes)?;
            self.db.put_cf(indices_cf, type_key.as_bytes(), value)?;
        }

        // Remove from language index
        let lang_key = format!("nodes_by_language:{}", node.language);
        if let Ok(mut lang_nodes) = self.get_index_entries::<Vec<NodeId>>(&lang_key) {
            lang_nodes.retain(|&id| id != node.id);
            let value = self.serialize(&lang_nodes)?;
            self.db.put_cf(indices_cf, lang_key.as_bytes(), value)?;
        }

        // Remove from name index
        let name_key = format!("nodes_by_name:{}", node.name);
        self.db.delete_cf(indices_cf, name_key.as_bytes())?;

        Ok(())
    }

    /// Remove edge from indices
    async fn remove_edge_from_indices(&self, edge: &StoredEdge) -> Result<()> {
        let indices_cf = self.get_cf(CF_INDICES)?;

        // Remove from type index
        let type_key = format!("edges_by_type:{}", edge.edge_type);
        if let Ok(mut type_edges) = self.get_index_entries::<Vec<EdgeId>>(&type_key) {
            type_edges.retain(|&id| id != edge.id);
            let value = self.serialize(&type_edges)?;
            self.db.put_cf(indices_cf, type_key.as_bytes(), value)?;
        }

        // Remove from source index
        let source_key = format!("edges_by_source:{}", edge.source);
        if let Ok(mut source_edges) = self.get_index_entries::<Vec<EdgeId>>(&source_key) {
            source_edges.retain(|&id| id != edge.id);
            let value = self.serialize(&source_edges)?;
            self.db.put_cf(indices_cf, source_key.as_bytes(), value)?;
        }

        // Remove from target index
        let target_key = format!("edges_by_target:{}", edge.target);
        if let Ok(mut target_edges) = self.get_index_entries::<Vec<EdgeId>>(&target_key) {
            target_edges.retain(|&id| id != edge.id);
            let value = self.serialize(&target_edges)?;
            self.db.put_cf(indices_cf, target_key.as_bytes(), value)?;
        }

        Ok(())
    }
}

#[async_trait::async_trait]
impl StorageBackend for RocksDbStorage {
    /// Save a node to storage
    async fn save_node(&self, node: &dyn CodeNode) -> Result<()> {
        let nodes_cf = self.get_cf(CF_NODES)?;
        let stored_node = self.node_to_stored(node);

        let key = format!("node:{}", node.id());
        let value = self.serialize(&stored_node)?;

        // Save node data
        self.db.put_cf(nodes_cf, key.as_bytes(), value)?;

        // Update indices
        self.update_node_indices(&stored_node).await?;

        Ok(())
    }

    /// Save an edge to storage
    async fn save_edge(&self, edge: &dyn CodeEdge) -> Result<()> {
        let edges_cf = self.get_cf(CF_EDGES)?;
        let stored_edge = self.edge_to_stored(edge);

        let key = format!("edge:{}", edge.id());
        let value = self.serialize(&stored_edge)?;

        // Save edge data
        self.db.put_cf(edges_cf, key.as_bytes(), value)?;

        // Update indices
        self.update_edge_indices(&stored_edge).await?;

        Ok(())
    }

    /// Load a node from storage
    async fn load_node(&self, node_id: NodeId) -> Result<Option<Vec<u8>>> {
        let nodes_cf = self.get_cf(CF_NODES)?;
        let key = format!("node:{}", node_id);

        match self.db.get_cf(nodes_cf, key.as_bytes())? {
            Some(value) => Ok(Some(value)),
            None => Ok(None),
        }
    }

    /// Load an edge from storage
    async fn load_edge(&self, edge_id: EdgeId) -> Result<Option<Vec<u8>>> {
        let edges_cf = self.get_cf(CF_EDGES)?;
        let key = format!("edge:{}", edge_id);

        match self.db.get_cf(edges_cf, key.as_bytes())? {
            Some(value) => Ok(Some(value)),
            None => Ok(None),
        }
    }

    /// Delete a node from storage
    async fn delete_node(&self, node_id: NodeId) -> Result<bool> {
        let nodes_cf = self.get_cf(CF_NODES)?;
        let key = format!("node:{}", node_id);

        // Load node first to update indices
        if let Some(value) = self.db.get_cf(nodes_cf, key.as_bytes())? {
            let stored_node: StoredNode = self.deserialize(&value)?;

            // Remove from indices
            self.remove_node_from_indices(&stored_node).await?;

            // Delete the node
            self.db.delete_cf(nodes_cf, key.as_bytes())?;

            Ok(true)
        } else {
            Ok(false)
        }
    }

    /// Delete an edge from storage
    async fn delete_edge(&self, edge_id: EdgeId) -> Result<bool> {
        let edges_cf = self.get_cf(CF_EDGES)?;
        let key = format!("edge:{}", edge_id);

        // Load edge first to update indices
        if let Some(value) = self.db.get_cf(edges_cf, key.as_bytes())? {
            let stored_edge: StoredEdge = self.deserialize(&value)?;

            // Remove from indices
            self.remove_edge_from_indices(&stored_edge).await?;

            // Delete the edge
            self.db.delete_cf(edges_cf, key.as_bytes())?;

            Ok(true)
        } else {
            Ok(false)
        }
    }

    /// List all nodes
    async fn list_nodes(&self) -> Result<Vec<NodeId>> {
        let nodes_cf = self.get_cf(CF_NODES)?;
        let mut node_ids = Vec::new();

        let iter = self.db.iterator_cf(nodes_cf, IteratorMode::Start);
        for item in iter {
            let (key, _value) = item?;
            let key_str = String::from_utf8_lossy(&*key);

            if let Some(node_id_str) = key_str.strip_prefix("node:") {
                if let Ok(node_id) = node_id_str.parse() {
                    node_ids.push(node_id);
                }
            }
        }

        Ok(node_ids)
    }

    /// List all edges
    async fn list_edges(&self) -> Result<Vec<EdgeId>> {
        let edges_cf = self.get_cf(CF_EDGES)?;
        let mut edge_ids = Vec::new();

        let iter = self.db.iterator_cf(edges_cf, IteratorMode::Start);
        for item in iter {
            let (key, _value) = item?;
            let key_str = String::from_utf8_lossy(&*key);

            if let Some(edge_id_str) = key_str.strip_prefix("edge:") {
                if let Ok(edge_id) = edge_id_str.parse() {
                    edge_ids.push(edge_id);
                }
            }
        }

        Ok(edge_ids)
    }

    /// Find nodes by type
    async fn find_nodes_by_type(&self, node_type: &str) -> Result<Vec<NodeId>> {
        let key = format!("nodes_by_type:{}", node_type);
        Ok(self.get_index_entries(&key).unwrap_or_else(|_| Vec::new()))
    }

    /// Find edges by type
    async fn find_edges_by_type(&self, edge_type: &str) -> Result<Vec<EdgeId>> {
        let key = format!("edges_by_type:{}", edge_type);
        Ok(self.get_index_entries(&key).unwrap_or_else(|_| Vec::new()))
    }

    /// Find edges by source node
    async fn find_edges_by_source(&self, source_id: NodeId) -> Result<Vec<EdgeId>> {
        let key = format!("edges_by_source:{}", source_id);
        Ok(self.get_index_entries(&key).unwrap_or_else(|_| Vec::new()))
    }

    /// Find edges by target node
    async fn find_edges_by_target(&self, target_id: NodeId) -> Result<Vec<EdgeId>> {
        let key = format!("edges_by_target:{}", target_id);
        Ok(self.get_index_entries(&key).unwrap_or_else(|_| Vec::new()))
    }

    /// Execute a transaction
    async fn execute_transaction<F, R>(&self, transaction: F) -> Result<R>
    where
        F: FnOnce() -> Result<R> + Send,
        R: Send,
    {
        // RocksDB doesn't have traditional transactions, but we can use WriteBatch
        // For now, just execute the function directly
        transaction()
    }

    /// Get storage statistics
    async fn get_stats(&self) -> Result<HashMap<String, String>> {
        let mut stats = HashMap::new();

        // Get RocksDB statistics
        if let Some(db_stats) = self.db.property_value("rocksdb.stats") {
            stats.insert("rocksdb_stats".to_string(), db_stats);
        }

        if let Some(num_files) = self.db.property_value("rocksdb.num-files-at-level0") {
            stats.insert("num_files_level0".to_string(), num_files);
        }

        if let Some(mem_usage) = self.db.property_value("rocksdb.cur-size-all-mem-tables") {
            stats.insert("memory_usage".to_string(), mem_usage);
        }

        // Count nodes and edges
        let node_count = self.list_nodes().await?.len();
        let edge_count = self.list_edges().await?.len();

        stats.insert("node_count".to_string(), node_count.to_string());
        stats.insert("edge_count".to_string(), edge_count.to_string());
        stats.insert("database_path".to_string(), self.path.clone());

        Ok(stats)
    }

    /// Backup the database
    async fn backup<P: AsRef<Path> + Send>(&self, backup_path: P) -> Result<()> {
        let backup_path_str = backup_path.as_ref().to_string_lossy().to_string();

        // Create backup engine
        let backup_opts = rocksdb::BackupEngineOptions::default();
        let mut backup_engine = rocksdb::BackupEngine::open(&backup_opts, &backup_path_str)
            .map_err(|e| StorageError::database_error(format!("Failed to create backup engine: {}", e)))?;

        // Create backup
        backup_engine.create_new_backup(&self.db)
            .map_err(|e| StorageError::database_error(format!("Failed to create backup: {}", e)))?;

        Ok(())
    }

    /// Restore from backup
    async fn restore<P: AsRef<Path> + Send>(&self, backup_path: P) -> Result<()> {
        let backup_path_str = backup_path.as_ref().to_string_lossy().to_string();

        // Create backup engine
        let backup_opts = rocksdb::BackupEngineOptions::default();
        let mut backup_engine = rocksdb::BackupEngine::open(&backup_opts, &backup_path_str)
            .map_err(|e| StorageError::database_error(format!("Failed to open backup engine: {}", e)))?;

        // Restore from latest backup
        backup_engine.restore_from_latest_backup(&self.path, &self.path, &rocksdb::RestoreOptions::default())
            .map_err(|e| StorageError::database_error(format!("Failed to restore from backup: {}", e)))?;

        Ok(())
    }

    /// Compact the database
    async fn compact(&self) -> Result<()> {
        // Compact all column families
        let cf_names = [CF_NODES, CF_EDGES, CF_METADATA, CF_INDICES, CF_RELATIONSHIPS];

        for cf_name in &cf_names {
            let cf = self.get_cf(cf_name)?;
            self.db.compact_range_cf(cf, None::<&[u8]>, None::<&[u8]>);
        }

        Ok(())
    }
}

impl Default for RocksDbStorage {
    fn default() -> Self {
        Self::new("./data/rocksdb").expect("Failed to create default RocksDB storage")
    }
}
