// Real-time file watching and change detection
use crate::{
    CodeChange, GraphUpdateResult, IncrementalParseEngine, IncrementalEngine, Result, TextEdit, Position,
};
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime};
use tokio::sync::{mpsc, Mutex};
use tracing::{debug, error, info, warn};
use dashmap::DashMap;
use serde::{Serialize, Deserialize};
use uuid::Uuid;

/// Real-time file watcher for code changes
pub struct FileWatcher {
    /// File system watcher
    watcher: Option<RecommendedWatcher>,
    /// Channel for receiving file events
    event_receiver: Option<mpsc::UnboundedReceiver<notify::Result<Event>>>,
    /// Incremental parse engine
    parse_engine: Arc<IncrementalParseEngine>,
    /// Configuration
    config: WatcherConfig,
    /// Tracked files and their metadata (using DashMap for concurrent access)
    tracked_files: Arc<DashMap<PathBuf, FileMetadata>>,
    /// Language extensions mapping
    language_extensions: HashMap<String, String>,
    /// Watcher state
    state: Arc<Mutex<WatcherState>>,
    /// Event debouncer for batching rapid changes
    debouncer: Arc<Mutex<EventDebouncer>>,
    /// Performance metrics
    metrics: Arc<Mutex<WatcherMetrics>>,
    /// Watched paths for state persistence
    watched_paths: Arc<Mutex<Vec<PathBuf>>>,
    /// Session ID for recovery
    session_id: String,
    /// Memory management configuration
    memory_config: MemoryConfig,
    /// Last memory check time
    last_memory_check: Arc<Mutex<SystemTime>>,
}

/// Watcher state management
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum WatcherState {
    Stopped,
    Starting,
    Running,
    Paused,
    Stopping,
}

/// Persistent state for the file watcher
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WatcherPersistentState {
    /// Current watcher state
    pub state: WatcherState,
    /// Watched directories
    pub watched_paths: Vec<PathBuf>,
    /// Configuration snapshot
    pub config: WatcherConfig,
    /// File metadata snapshot
    pub file_metadata: HashMap<PathBuf, FileMetadata>,
    /// Last save timestamp
    pub last_saved: SystemTime,
    /// Session ID for recovery
    pub session_id: String,
}

/// Event debouncer for handling rapid file changes
#[derive(Debug)]
struct EventDebouncer {
    /// Pending events grouped by file path
    pending_events: HashMap<PathBuf, (Event, Instant)>,
    /// Last flush time
    last_flush: Instant,
}

/// Performance metrics for the watcher
#[derive(Debug)]
struct WatcherMetrics {
    /// Total events processed
    total_events: u64,
    /// Total files processed
    total_files: u64,
    /// Average processing time per file (ms)
    avg_processing_time_ms: f64,
    /// Peak memory usage (bytes)
    peak_memory_usage: u64,
    /// Current memory usage (bytes)
    current_memory_usage: u64,
    /// Number of batch operations
    batch_operations: u64,
    /// Number of memory cleanups performed
    memory_cleanups: u64,
    /// Last memory cleanup time
    last_cleanup_time: SystemTime,
}

impl Default for WatcherMetrics {
    fn default() -> Self {
        Self {
            total_events: 0,
            total_files: 0,
            avg_processing_time_ms: 0.0,
            peak_memory_usage: 0,
            current_memory_usage: 0,
            batch_operations: 0,
            memory_cleanups: 0,
            last_cleanup_time: SystemTime::now(),
        }
    }
}

/// Memory management configuration
#[derive(Debug, Clone)]
pub struct MemoryConfig {
    /// Maximum memory usage before triggering cleanup (bytes)
    max_memory_usage: u64,
    /// Cleanup interval in seconds
    cleanup_interval_seconds: u64,
    /// Maximum age for cached entries (seconds)
    max_cache_age_seconds: u64,
    /// Enable aggressive memory management
    aggressive_cleanup: bool,
    /// Memory pressure threshold (0.0 to 1.0)
    memory_pressure_threshold: f64,
}

impl Default for MemoryConfig {
    fn default() -> Self {
        Self {
            max_memory_usage: 512 * 1024 * 1024, // 512MB
            cleanup_interval_seconds: 300, // 5 minutes
            max_cache_age_seconds: 1800, // 30 minutes
            aggressive_cleanup: false,
            memory_pressure_threshold: 0.8, // 80%
        }
    }
}

/// Configuration for the file watcher
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WatcherConfig {
    /// Debounce delay in milliseconds (optimized for <50ms detection)
    pub debounce_delay_ms: u64,
    /// Maximum number of events to process per batch
    pub max_batch_size: usize,
    /// File extensions to watch
    pub watched_extensions: Vec<String>,
    /// Directories to ignore
    pub ignored_directories: Vec<String>,
    /// Whether to watch subdirectories recursively
    pub recursive: bool,
    /// Whether to process initial scan
    pub process_initial_scan: bool,
    /// Maximum processing time per batch (ms) to ensure responsiveness
    pub max_batch_processing_time_ms: u64,
    /// Enable parallel processing of file changes
    pub enable_parallel_processing: bool,
    /// Maximum number of concurrent file processors
    pub max_concurrent_processors: usize,
    /// Memory usage threshold for triggering cleanup (bytes)
    pub memory_cleanup_threshold: u64,
    /// Enable conflict detection for concurrent updates
    pub enable_conflict_detection: bool,
}

impl Default for WatcherConfig {
    fn default() -> Self {
        Self {
            // Optimized for <50ms detection delay
            debounce_delay_ms: 25,
            max_batch_size: 100,
            watched_extensions: vec![
                ".rs".to_string(),
                ".py".to_string(),
                ".js".to_string(),
                ".ts".to_string(),
                ".java".to_string(),
                ".cpp".to_string(),
                ".c".to_string(),
                ".go".to_string(),
                ".jsx".to_string(),
                ".tsx".to_string(),
                ".vue".to_string(),
                ".php".to_string(),
                ".rb".to_string(),
                ".swift".to_string(),
                ".kt".to_string(),
                ".scala".to_string(),
            ],
            ignored_directories: vec![
                ".git".to_string(),
                "node_modules".to_string(),
                "target".to_string(),
                "__pycache__".to_string(),
                ".vscode".to_string(),
                ".idea".to_string(),
                "build".to_string(),
                "dist".to_string(),
                ".next".to_string(),
                ".nuxt".to_string(),
                "coverage".to_string(),
            ],
            recursive: true,
            process_initial_scan: true,
            // Performance optimizations
            max_batch_processing_time_ms: 400, // Leave 100ms buffer for <500ms requirement
            enable_parallel_processing: true,
            max_concurrent_processors: num_cpus::get().max(4),
            memory_cleanup_threshold: 512 * 1024 * 1024, // 512MB
            enable_conflict_detection: true,
        }
    }
}

/// Metadata for tracked files
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileMetadata {
    /// Last modification time
    last_modified: SystemTime,
    /// File size in bytes
    size: u64,
    /// Content hash (for change detection)
    content_hash: Option<String>,
    /// Programming language
    language: String,
    /// Whether the file is currently being processed
    processing: bool,
    /// Number of times this file has been processed
    process_count: u64,
    /// Last processing time in milliseconds
    last_processing_time_ms: u64,
    /// Conflict detection version
    version: u64,
}

impl EventDebouncer {
    fn new() -> Self {
        Self {
            pending_events: HashMap::new(),
            last_flush: Instant::now(),
        }
    }

    /// Add an event to the debouncer
    fn add_event(&mut self, event: Event) {
        if let Some(path) = event.paths.first() {
            self.pending_events.insert(path.clone(), (event, Instant::now()));
        }
    }

    /// Get events that are ready to be processed (after debounce delay)
    fn get_ready_events(&mut self, debounce_delay: Duration) -> Vec<Event> {
        let now = Instant::now();
        let mut ready_events = Vec::new();
        let mut paths_to_remove = Vec::new();

        for (path, (event, timestamp)) in &self.pending_events {
            if now.duration_since(*timestamp) >= debounce_delay {
                ready_events.push(event.clone());
                paths_to_remove.push(path.clone());
            }
        }

        // Remove processed events
        for path in paths_to_remove {
            self.pending_events.remove(&path);
        }

        ready_events
    }

    /// Force flush all pending events
    fn flush_all(&mut self) -> Vec<Event> {
        let events: Vec<Event> = self.pending_events.values().map(|(event, _)| event.clone()).collect();
        self.pending_events.clear();
        self.last_flush = Instant::now();
        events
    }
}

/// File change event with processed information
#[derive(Debug, Clone)]
pub struct FileChangeEvent {
    /// Path of the changed file
    pub file_path: PathBuf,
    /// Type of change
    pub change_type: FileChangeType,
    /// Programming language
    pub language: String,
    /// Timestamp of the change
    pub timestamp: SystemTime,
    /// File content (if available)
    pub content: Option<String>,
}

/// Type of file change
#[derive(Debug, Clone, PartialEq)]
pub enum FileChangeType {
    Created,
    Modified,
    Deleted,
    Renamed { from: PathBuf, to: PathBuf },
}

/// Result of processing file changes
#[derive(Debug, Clone)]
pub struct WatchResult {
    /// Number of files processed
    pub files_processed: usize,
    /// Number of successful updates
    pub successful_updates: usize,
    /// Number of failed updates
    pub failed_updates: usize,
    /// Total processing time
    pub processing_time_ms: u64,
    /// Individual file results
    pub file_results: Vec<FileProcessResult>,
}

/// Result of processing a single file
#[derive(Debug, Clone)]
pub struct FileProcessResult {
    pub file_path: PathBuf,
    pub success: bool,
    pub error: Option<String>,
    pub graph_update: Option<GraphUpdateResult>,
    pub processing_time_ms: u64,
}

impl FileWatcher {
    /// Create a new file watcher
    pub fn new(parse_engine: Arc<IncrementalParseEngine>) -> Self {
        Self {
            watcher: None,
            event_receiver: None,
            parse_engine,
            config: WatcherConfig::default(),
            tracked_files: Arc::new(DashMap::new()),
            language_extensions: Self::create_language_extensions_map(),
            state: Arc::new(Mutex::new(WatcherState::Stopped)),
            debouncer: Arc::new(Mutex::new(EventDebouncer::new())),
            metrics: Arc::new(Mutex::new(WatcherMetrics::default())),
            watched_paths: Arc::new(Mutex::new(Vec::new())),
            session_id: Uuid::new_v4().to_string(),
            memory_config: MemoryConfig::default(),
            last_memory_check: Arc::new(Mutex::new(SystemTime::now())),
        }
    }

    /// Create a new file watcher with custom configuration
    pub fn with_config(
        parse_engine: Arc<IncrementalParseEngine>,
        config: WatcherConfig,
    ) -> Self {
        Self {
            watcher: None,
            event_receiver: None,
            parse_engine,
            config,
            tracked_files: Arc::new(DashMap::new()),
            language_extensions: Self::create_language_extensions_map(),
            state: Arc::new(Mutex::new(WatcherState::Stopped)),
            debouncer: Arc::new(Mutex::new(EventDebouncer::new())),
            metrics: Arc::new(Mutex::new(WatcherMetrics::default())),
            watched_paths: Arc::new(Mutex::new(Vec::new())),
            session_id: Uuid::new_v4().to_string(),
            memory_config: MemoryConfig::default(),
            last_memory_check: Arc::new(Mutex::new(SystemTime::now())),
        }
    }

    /// Start watching a directory
    pub async fn start_watching(&mut self, path: &Path) -> Result<()> {
        info!("Starting file watcher for path: {:?}", path);

        // Update state to starting
        {
            let mut state = self.state.lock().await;
            if *state != WatcherState::Stopped {
                return Err(crate::CodeGraphError::internal_error(
                    "Watcher is already running or in transition".to_string(),
                ));
            }
            *state = WatcherState::Starting;
        }

        // Create event channel with larger buffer for better performance
        let (tx, rx) = mpsc::unbounded_channel();
        self.event_receiver = Some(rx);

        // Create watcher with optimized config
        let mut config = Config::default();
        // Reduce polling interval for faster detection
        config = config.with_poll_interval(Duration::from_millis(10));

        let mut watcher = RecommendedWatcher::new(
            move |res| {
                if let Err(e) = tx.send(res) {
                    error!("Failed to send file event: {}", e);
                }
            },
            config,
        )
        .map_err(|e| crate::CodeGraphError::internal_error(format!("Failed to create watcher: {}", e)))?;

        // Start watching
        let mode = if self.config.recursive {
            RecursiveMode::Recursive
        } else {
            RecursiveMode::NonRecursive
        };

        watcher
            .watch(path, mode)
            .map_err(|e| crate::CodeGraphError::internal_error(format!("Failed to start watching: {}", e)))?;

        self.watcher = Some(watcher);

        // Update state to running
        {
            let mut state = self.state.lock().await;
            *state = WatcherState::Running;
        }

        // Track watched path
        {
            let mut watched_paths = self.watched_paths.lock().await;
            watched_paths.push(path.to_path_buf());
        }

        // Perform initial scan if configured
        if self.config.process_initial_scan {
            self.perform_initial_scan(path).await?;
        }

        // Save state after successful start
        self.save_state().await?;

        info!("File watcher started successfully");
        Ok(())
    }

    /// Stop watching
    pub async fn stop_watching(&mut self) -> Result<()> {
        info!("Stopping file watcher");

        // Update state to stopping
        {
            let mut state = self.state.lock().await;
            *state = WatcherState::Stopping;
        }

        // Save state before stopping
        self.save_state().await?;

        if let Some(_watcher) = self.watcher.take() {
            // The watcher will be dropped, automatically stopping the watching
        }

        self.event_receiver = None;
        self.tracked_files.clear();

        // Clear watched paths
        {
            let mut watched_paths = self.watched_paths.lock().await;
            watched_paths.clear();
        }

        // Update state to stopped
        {
            let mut state = self.state.lock().await;
            *state = WatcherState::Stopped;
        }

        // Clean up state files
        self.cleanup_state_files().await?;

        info!("File watcher stopped");
        Ok(())
    }

    /// Pause file monitoring
    pub async fn pause_monitoring(&self) -> Result<()> {
        let mut state = self.state.lock().await;
        match *state {
            WatcherState::Running => {
                *state = WatcherState::Paused;
                info!("File monitoring paused");
                Ok(())
            }
            _ => Err(crate::CodeGraphError::internal_error(
                "Cannot pause: watcher is not running".to_string(),
            )),
        }
    }

    /// Resume file monitoring
    pub async fn resume_monitoring(&self) -> Result<()> {
        let mut state = self.state.lock().await;
        match *state {
            WatcherState::Paused => {
                *state = WatcherState::Running;
                info!("File monitoring resumed");
                Ok(())
            }
            _ => Err(crate::CodeGraphError::internal_error(
                "Cannot resume: watcher is not paused".to_string(),
            )),
        }
    }

    /// Get current watcher state
    pub async fn get_state(&self) -> WatcherState {
        self.state.lock().await.clone()
    }

    /// Process file events (main event loop)
    pub async fn process_events(&mut self) -> Result<WatchResult> {
        let start_time = Instant::now();
        let mut file_results = Vec::new();
        let mut files_processed = 0;
        let mut successful_updates = 0;
        let mut failed_updates = 0;

        // Check if monitoring is paused
        {
            let state = self.state.lock().await;
            if *state == WatcherState::Paused {
                return Ok(WatchResult {
                    files_processed: 0,
                    successful_updates: 0,
                    failed_updates: 0,
                    processing_time_ms: 0,
                    file_results: Vec::new(),
                });
            }
        }

        if let Some(receiver) = &mut self.event_receiver {
            let mut raw_events = Vec::new();

            // Collect raw events quickly
            while let Ok(event_result) = receiver.try_recv() {
                match event_result {
                    Ok(event) => {
                        raw_events.push(event);
                        if raw_events.len() >= self.config.max_batch_size {
                            break;
                        }
                    }
                    Err(e) => {
                        warn!("File watcher error: {}", e);
                    }
                }
            }

            // Add events to debouncer
            {
                let mut debouncer = self.debouncer.lock().await;
                for event in raw_events {
                    debouncer.add_event(event);
                }
            }

            // Get debounced events ready for processing
            let debounced_events = {
                let mut debouncer = self.debouncer.lock().await;
                debouncer.get_ready_events(Duration::from_millis(self.config.debounce_delay_ms))
            };

            // Process events with parallel processing if enabled
            if self.config.enable_parallel_processing && debounced_events.len() > 1 {
                file_results = self.process_events_parallel(debounced_events).await?;
            } else {
                file_results = self.process_events_sequential(debounced_events).await?;
            }

            // Update statistics
            files_processed = file_results.len();
            successful_updates = file_results.iter().filter(|r| r.success).count();
            failed_updates = file_results.iter().filter(|r| !r.success).count();

            // Update metrics
            {
                let mut metrics = self.metrics.lock().await;
                metrics.total_events += files_processed as u64;
                metrics.total_files += files_processed as u64;
                metrics.batch_operations += 1;

                let processing_time_ms = start_time.elapsed().as_millis() as f64;
                if files_processed > 0 {
                    metrics.avg_processing_time_ms =
                        (metrics.avg_processing_time_ms * (metrics.total_files - files_processed as u64) as f64 + processing_time_ms)
                        / metrics.total_files as f64;
                }
            }
        }

        let processing_time = start_time.elapsed().as_millis() as u64;

        // Perform memory management check
        if let Err(e) = self.check_and_cleanup_memory().await {
            warn!("Memory cleanup failed: {}", e);
        }

        Ok(WatchResult {
            files_processed,
            successful_updates,
            failed_updates,
            processing_time_ms: processing_time,
            file_results,
        })
    }

    /// Process events in parallel for better performance
    async fn process_events_parallel(&self, events: Vec<Event>) -> Result<Vec<FileProcessResult>> {
        use futures::future::join_all;

        let mut tasks = Vec::new();
        let semaphore = Arc::new(tokio::sync::Semaphore::new(self.config.max_concurrent_processors));

        for event in events {
            let semaphore = semaphore.clone();
            let task = async move {
                let _permit = semaphore.acquire().await.unwrap();
                self.process_single_event(event).await
            };
            tasks.push(task);
        }

        let results = join_all(tasks).await;
        let mut file_results = Vec::new();

        for result in results {
            if let Ok(Some(file_result)) = result {
                file_results.push(file_result);
            }
        }

        Ok(file_results)
    }

    /// Process events sequentially
    async fn process_events_sequential(&self, events: Vec<Event>) -> Result<Vec<FileProcessResult>> {
        let mut file_results = Vec::new();

        for event in events {
            if let Ok(Some(file_result)) = self.process_single_event(event).await {
                file_results.push(file_result);
            }
        }

        Ok(file_results)
    }

    /// Process a single event and return the result
    async fn process_single_event(&self, event: Event) -> Result<Option<FileProcessResult>> {
        let start_time = Instant::now();

        if let Some(file_event) = self.process_file_event(event).await? {
            let result = self.process_file_change(&file_event).await;
            let processing_time_ms = start_time.elapsed().as_millis() as u64;

            match result {
                Ok(graph_update) => {
                    Ok(Some(FileProcessResult {
                        file_path: file_event.file_path.clone(),
                        success: true,
                        error: None,
                        graph_update: Some(graph_update),
                        processing_time_ms,
                    }))
                }
                Err(e) => {
                    Ok(Some(FileProcessResult {
                        file_path: file_event.file_path.clone(),
                        success: false,
                        error: Some(e.to_string()),
                        graph_update: None,
                        processing_time_ms,
                    }))
                }
            }
        } else {
            Ok(None)
        }
    }

    /// Perform initial scan of the directory
    async fn perform_initial_scan(&self, path: &Path) -> Result<()> {
        info!("Performing initial scan of: {:?}", path);

        // TODO: Implement recursive directory scanning
        // For now, just log that we would scan
        debug!("Initial scan completed for: {:?}", path);

        Ok(())
    }

    /// Process a single file system event
    async fn process_file_event(&self, event: Event) -> Result<Option<FileChangeEvent>> {
        debug!("Processing file event: {:?}", event);

        match event.kind {
            EventKind::Create(_) => {
                if let Some(path) = event.paths.first() {
                    if self.should_process_file(path) {
                        return Ok(Some(FileChangeEvent {
                            file_path: path.clone(),
                            change_type: FileChangeType::Created,
                            language: self.detect_language(path),
                            timestamp: SystemTime::now(),
                            content: self.read_file_content(path).await.ok(),
                        }));
                    }
                }
            }
            EventKind::Modify(_) => {
                if let Some(path) = event.paths.first() {
                    if self.should_process_file(path) {
                        return Ok(Some(FileChangeEvent {
                            file_path: path.clone(),
                            change_type: FileChangeType::Modified,
                            language: self.detect_language(path),
                            timestamp: SystemTime::now(),
                            content: self.read_file_content(path).await.ok(),
                        }));
                    }
                }
            }
            EventKind::Remove(_) => {
                if let Some(path) = event.paths.first() {
                    if self.should_process_file(path) {
                        return Ok(Some(FileChangeEvent {
                            file_path: path.clone(),
                            change_type: FileChangeType::Deleted,
                            language: self.detect_language(path),
                            timestamp: SystemTime::now(),
                            content: None,
                        }));
                    }
                }
            }
            _ => {
                debug!("Ignoring event kind: {:?}", event.kind);
            }
        }

        Ok(None)
    }

    /// Process a file change event
    async fn process_file_change(&self, event: &FileChangeEvent) -> Result<GraphUpdateResult> {
        debug!("Processing file change: {:?}", event.file_path);

        // Enhanced conflict detection if enabled
        if self.config.enable_conflict_detection {
            // Check for existing processing
            if let Some(mut metadata) = self.tracked_files.get_mut(&event.file_path) {
                if metadata.processing {
                    warn!("File {} is already being processed, applying conflict resolution", event.file_path.display());

                    // Use conflict detection from incremental engine
                    let file_path_str = event.file_path.to_string_lossy();
                    let conflicts = self.parse_engine.detect_update_conflicts(&file_path_str, metadata.version + 1).await?;

                    if !conflicts.is_empty() {
                        let resolution = self.parse_engine.resolve_conflicts(conflicts).await?;

                        // Check if conflicts were resolved successfully
                        if !resolution.failed_conflicts.is_empty() {
                            warn!("Failed to resolve {} conflicts for file {}",
                                  resolution.failed_conflicts.len(), event.file_path.display());
                            return Ok(GraphUpdateResult::default());
                        }

                        info!("Resolved {} conflicts for file {} in {}ms",
                              resolution.resolved_conflicts.len(),
                              event.file_path.display(),
                              resolution.resolution_time_ms);
                    }
                }
                metadata.processing = true;
                metadata.version += 1;
            }
        }

        let result = match &event.change_type {
            FileChangeType::Created | FileChangeType::Modified => {
                if let Some(content) = &event.content {
                    // Update file metadata
                    let metadata = FileMetadata {
                        last_modified: event.timestamp,
                        size: content.len() as u64,
                        content_hash: Some(self.calculate_content_hash(content)),
                        language: event.language.clone(),
                        processing: true,
                        process_count: self.tracked_files.get(&event.file_path)
                            .map(|m| m.process_count + 1)
                            .unwrap_or(1),
                        last_processing_time_ms: 0, // Will be updated after processing
                        version: self.tracked_files.get(&event.file_path)
                            .map(|m| m.version + 1)
                            .unwrap_or(1),
                    };
                    self.tracked_files.insert(event.file_path.clone(), metadata);

                    // Create a CodeChange for the incremental parser
                    let code_change = CodeChange {
                        file_path: event.file_path.to_string_lossy().to_string(),
                        language: event.language.clone(),
                        edit: TextEdit {
                            start_byte: 0,
                            old_end_byte: 0,
                            new_end_byte: content.len() as u32,
                            start_position: Position::new(0, 0),
                            old_end_position: Position::new(0, 0),
                            new_end_position: Position::new(0, 0), // TODO: Calculate actual position
                            new_text: content.clone(),
                        },
                        timestamp: chrono::DateTime::from(event.timestamp),
                    };

                    // Process the change through the incremental engine
                    self.parse_engine.update_graph(&code_change).await
                } else {
                    // File content not available
                    Ok(GraphUpdateResult::default())
                }
            }
            FileChangeType::Deleted => {
                // Remove from tracked files
                self.tracked_files.remove(&event.file_path);
                // TODO: Handle file deletion (remove nodes from graph)
                Ok(GraphUpdateResult::default())
            }
            FileChangeType::Renamed { from, to } => {
                // Update tracked files mapping
                if let Some((_, metadata)) = self.tracked_files.remove(from) {
                    self.tracked_files.insert(to.clone(), metadata);
                }
                // TODO: Handle file renaming in graph
                Ok(GraphUpdateResult::default())
            }
        };

        // Mark processing as complete
        if self.config.enable_conflict_detection {
            if let Some(mut metadata) = self.tracked_files.get_mut(&event.file_path) {
                metadata.processing = false;
            }
        }

        result
    }

    /// Calculate content hash for change detection
    fn calculate_content_hash(&self, content: &str) -> String {
        use std::collections::hash_map::DefaultHasher;
        use std::hash::{Hash, Hasher};

        let mut hasher = DefaultHasher::new();
        content.hash(&mut hasher);
        format!("{:x}", hasher.finish())
    }

    /// Check if a file should be processed
    fn should_process_file(&self, path: &Path) -> bool {
        // Check if it's a file (not directory)
        if !path.is_file() {
            return false;
        }

        // Check extension
        if let Some(extension) = path.extension() {
            let ext_str = format!(".{}", extension.to_string_lossy());
            if !self.config.watched_extensions.contains(&ext_str) {
                return false;
            }
        } else {
            return false;
        }

        // Check if in ignored directory
        for ignored in &self.config.ignored_directories {
            if path.to_string_lossy().contains(ignored) {
                return false;
            }
        }

        true
    }

    /// Detect programming language from file extension
    fn detect_language(&self, path: &Path) -> String {
        if let Some(extension) = path.extension() {
            let ext_str = format!(".{}", extension.to_string_lossy());
            self.language_extensions
                .get(&ext_str)
                .cloned()
                .unwrap_or_else(|| "unknown".to_string())
        } else {
            "unknown".to_string()
        }
    }

    /// Read file content
    async fn read_file_content(&self, path: &Path) -> Result<String> {
        tokio::fs::read_to_string(path)
            .await
            .map_err(|e| crate::CodeGraphError::internal_error(format!("Failed to read file: {}", e)))
    }

    /// Create language extensions mapping
    fn create_language_extensions_map() -> HashMap<String, String> {
        let mut map = HashMap::new();
        map.insert(".rs".to_string(), "rust".to_string());
        map.insert(".py".to_string(), "python".to_string());
        map.insert(".pyw".to_string(), "python".to_string());
        map.insert(".js".to_string(), "javascript".to_string());
        map.insert(".ts".to_string(), "typescript".to_string());
        map.insert(".java".to_string(), "java".to_string());
        map.insert(".cpp".to_string(), "cpp".to_string());
        map.insert(".cxx".to_string(), "cpp".to_string());
        map.insert(".cc".to_string(), "cpp".to_string());
        map.insert(".c".to_string(), "c".to_string());
        map.insert(".go".to_string(), "go".to_string());
        map
    }

    /// Get statistics about the watcher
    pub async fn get_stats(&self) -> WatcherStats {
        let metrics = self.metrics.lock().await;
        let state = self.state.lock().await;

        WatcherStats {
            tracked_files_count: self.tracked_files.len(),
            is_watching: self.watcher.is_some(),
            state: state.clone(),
            total_events: metrics.total_events,
            total_files: metrics.total_files,
            avg_processing_time_ms: metrics.avg_processing_time_ms,
            peak_memory_usage: metrics.peak_memory_usage,
            batch_operations: metrics.batch_operations,
        }
    }

    /// Force memory cleanup with comprehensive optimization
    pub async fn cleanup_memory(&self) -> Result<()> {
        let start_time = Instant::now();
        let initial_memory = self.estimate_memory_usage().await;

        info!("Starting memory cleanup - current usage: {} bytes", initial_memory);

        // Update last cleanup time
        {
            let mut metrics = self.metrics.lock().await;
            metrics.last_cleanup_time = SystemTime::now();
            metrics.memory_cleanups += 1;
        }

        let mut total_removed = 0;

        // 1. Clean up old tracked files
        let file_cutoff_time = SystemTime::now() - Duration::from_secs(self.memory_config.max_cache_age_seconds);
        let mut file_removed_count = 0;

        self.tracked_files.retain(|_, metadata| {
            if metadata.last_modified < file_cutoff_time && !metadata.processing {
                file_removed_count += 1;
                false
            } else {
                true
            }
        });
        total_removed += file_removed_count;

        // 2. Clean up debouncer pending events
        {
            let mut debouncer = self.debouncer.lock().await;
            let old_count = debouncer.pending_events.len();
            debouncer.pending_events.retain(|_, (_, timestamp)| {
                timestamp.elapsed() < Duration::from_secs(60)
            });
            let removed = old_count - debouncer.pending_events.len();
            total_removed += removed;
            debug!("Cleaned up {} old debouncer events", removed);
        }

        // 3. Trigger incremental engine cleanup
        if let Err(e) = self.parse_engine.cleanup_cache().await {
            warn!("Failed to cleanup incremental engine cache: {}", e);
        }

        // 4. Force garbage collection hint
        if self.memory_config.aggressive_cleanup {
            // In Rust, we can't force GC, but we can drop large allocations
            // This is more of a placeholder for potential future optimizations
            debug!("Aggressive cleanup mode enabled");
        }

        let final_memory = self.estimate_memory_usage().await;
        let memory_saved = initial_memory.saturating_sub(final_memory);
        let cleanup_time = start_time.elapsed();

        // Update metrics
        {
            let mut metrics = self.metrics.lock().await;
            metrics.current_memory_usage = final_memory;
            if final_memory > metrics.peak_memory_usage {
                metrics.peak_memory_usage = final_memory;
            }
        }

        info!("Memory cleanup completed: removed {} items, saved {} bytes in {}ms",
              total_removed, memory_saved, cleanup_time.as_millis());

        Ok(())
    }

    /// Check if memory cleanup is needed and perform it
    pub async fn check_and_cleanup_memory(&self) -> Result<()> {
        let current_time = SystemTime::now();
        let should_check = {
            let mut last_check = self.last_memory_check.lock().await;
            if let Ok(duration) = current_time.duration_since(*last_check) {
                if duration.as_secs() >= self.memory_config.cleanup_interval_seconds {
                    *last_check = current_time;
                    true
                } else {
                    false
                }
            } else {
                *last_check = current_time;
                true
            }
        };

        if should_check {
            let current_memory = self.estimate_memory_usage().await;
            let memory_pressure = current_memory as f64 / self.memory_config.max_memory_usage as f64;

            if memory_pressure > self.memory_config.memory_pressure_threshold {
                warn!("Memory pressure detected: {:.1}% ({} bytes)",
                      memory_pressure * 100.0, current_memory);
                self.cleanup_memory().await?;
            }
        }

        Ok(())
    }

    /// Get detailed memory statistics
    pub async fn get_memory_stats(&self) -> WatcherMemoryStats {
        let current_usage = self.estimate_memory_usage().await;
        let metrics = self.metrics.lock().await;

        WatcherMemoryStats {
            current_usage_bytes: current_usage,
            peak_usage_bytes: metrics.peak_memory_usage,
            tracked_files_count: self.tracked_files.len(),
            debouncer_events_count: {
                let debouncer = self.debouncer.lock().await;
                debouncer.pending_events.len()
            },
            memory_pressure: current_usage as f64 / self.memory_config.max_memory_usage as f64,
            cleanup_count: metrics.memory_cleanups,
            last_cleanup: metrics.last_cleanup_time,
        }
    }

    /// Configure memory management settings
    pub fn configure_memory_management(&mut self, config: MemoryConfig) {
        self.memory_config = config;
        info!("Updated memory management configuration");
    }

    /// Estimate current memory usage
    async fn estimate_memory_usage(&self) -> u64 {
        // Simple estimation based on tracked files count
        // In a real implementation, you might use more sophisticated memory tracking
        self.tracked_files.len() as u64 * 1024 // Rough estimate: 1KB per tracked file
    }

    /// Check if a file can be updated safely without conflicts
    pub async fn can_update_file_safely(&self, file_path: &std::path::Path) -> Result<bool> {
        if !self.config.enable_conflict_detection {
            return Ok(true); // No conflict detection enabled
        }

        let file_path_str = file_path.to_string_lossy();

        // Get current version
        let current_version = if let Some(metadata) = self.tracked_files.get(file_path) {
            metadata.version
        } else {
            0 // New file
        };

        // Use incremental engine to check safety
        self.parse_engine.can_update_safely(&file_path_str, current_version + 1).await
    }

    /// Force resolve conflicts for a specific file
    pub async fn force_resolve_conflicts(&self, file_path: &std::path::Path) -> Result<()> {
        let file_path_str = file_path.to_string_lossy();

        // Get current version
        let current_version = if let Some(metadata) = self.tracked_files.get(file_path) {
            metadata.version
        } else {
            0
        };

        // Detect conflicts
        let conflicts = self.parse_engine.detect_update_conflicts(&file_path_str, current_version + 1).await?;

        if !conflicts.is_empty() {
            info!("Force resolving {} conflicts for file {}", conflicts.len(), file_path.display());
            let resolution = self.parse_engine.resolve_conflicts(conflicts).await?;

            if !resolution.failed_conflicts.is_empty() {
                return Err(crate::CodeGraphError::graph_update_error(
                    format!("Failed to resolve {} conflicts for file {}",
                            resolution.failed_conflicts.len(),
                            file_path.display())
                ));
            }
        }

        Ok(())
    }

    // ===== STATE PERSISTENCE AND RECOVERY =====

    /// Save the current watcher state to disk
    pub async fn save_state(&self) -> Result<()> {
        let state_path = self.get_state_file_path();

        let persistent_state = WatcherPersistentState {
            state: self.get_state().await,
            watched_paths: self.watched_paths.lock().await.clone(),
            config: self.config.clone(),
            file_metadata: self.get_file_metadata_snapshot().await,
            last_saved: SystemTime::now(),
            session_id: self.session_id.clone(),
        };

        let state_json = serde_json::to_string_pretty(&persistent_state)
            .map_err(|e| crate::CodeGraphError::internal_error(format!("Failed to serialize state: {}", e)))?;

        tokio::fs::write(&state_path, state_json).await
            .map_err(|e| crate::CodeGraphError::internal_error(format!("Failed to write state file: {}", e)))?;

        debug!("Saved watcher state to: {:?}", state_path);
        Ok(())
    }

    /// Load watcher state from disk
    pub async fn load_state(&mut self) -> Result<bool> {
        let state_path = self.get_state_file_path();

        if !state_path.exists() {
            debug!("No state file found at: {:?}", state_path);
            return Ok(false);
        }

        let state_content = tokio::fs::read_to_string(&state_path).await
            .map_err(|e| crate::CodeGraphError::internal_error(format!("Failed to read state file: {}", e)))?;

        let persistent_state: WatcherPersistentState = serde_json::from_str(&state_content)
            .map_err(|e| crate::CodeGraphError::internal_error(format!("Failed to deserialize state: {}", e)))?;

        // Restore state
        {
            let mut state = self.state.lock().await;
            *state = persistent_state.state;
        }

        // Restore watched paths
        {
            let mut watched_paths = self.watched_paths.lock().await;
            *watched_paths = persistent_state.watched_paths;
        }

        // Restore file metadata
        self.restore_file_metadata(persistent_state.file_metadata).await;

        // Update session ID for new session
        self.session_id = Uuid::new_v4().to_string();

        info!("Loaded watcher state from: {:?}", state_path);
        Ok(true)
    }

    /// Recover from a previous session
    pub async fn recover_from_previous_session(&mut self) -> Result<()> {
        if self.load_state().await? {
            info!("Recovering from previous session");

            // Check if we were in a running state
            let current_state = self.get_state().await;
            if current_state == WatcherState::Running || current_state == WatcherState::Paused {
                // Restart watching for all previously watched paths
                let watched_paths = self.watched_paths.lock().await.clone();
                for path in watched_paths {
                    info!("Restarting watch for path: {:?}", path);
                    self.start_watching(&path).await?;
                }
            }
        }

        Ok(())
    }

    /// Get the path for the state file
    fn get_state_file_path(&self) -> PathBuf {
        let mut state_dir = std::env::temp_dir();
        state_dir.push("codegraph-watcher");
        std::fs::create_dir_all(&state_dir).ok(); // Create directory if it doesn't exist
        state_dir.push(format!("watcher-state-{}.json", self.session_id));
        state_dir
    }

    /// Get a snapshot of file metadata for persistence
    async fn get_file_metadata_snapshot(&self) -> HashMap<PathBuf, FileMetadata> {
        let mut snapshot = HashMap::new();
        for entry in self.tracked_files.iter() {
            snapshot.insert(entry.key().clone(), entry.value().clone());
        }
        snapshot
    }

    /// Restore file metadata from snapshot
    async fn restore_file_metadata(&self, metadata: HashMap<PathBuf, FileMetadata>) {
        for (path, meta) in metadata {
            self.tracked_files.insert(path, meta);
        }
    }

    /// Clean up state files
    pub async fn cleanup_state_files(&self) -> Result<()> {
        let state_path = self.get_state_file_path();
        if state_path.exists() {
            tokio::fs::remove_file(&state_path).await
                .map_err(|e| crate::CodeGraphError::internal_error(format!("Failed to remove state file: {}", e)))?;
            debug!("Cleaned up state file: {:?}", state_path);
        }
        Ok(())
    }

    /// Get current session ID
    pub fn get_session_id(&self) -> &str {
        &self.session_id
    }
}

/// Memory usage statistics for the watcher
#[derive(Debug, Clone)]
pub struct WatcherMemoryStats {
    pub current_usage_bytes: u64,
    pub peak_usage_bytes: u64,
    pub tracked_files_count: usize,
    pub debouncer_events_count: usize,
    pub memory_pressure: f64,
    pub cleanup_count: u64,
    pub last_cleanup: SystemTime,
}

/// Statistics about the file watcher
#[derive(Debug, Clone)]
pub struct WatcherStats {
    pub tracked_files_count: usize,
    pub is_watching: bool,
    pub state: WatcherState,
    pub total_events: u64,
    pub total_files: u64,
    pub avg_processing_time_ms: f64,
    pub peak_memory_usage: u64,
    pub batch_operations: u64,
}
