use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};

use crossbeam::channel::{self, Sender};
use dashmap::DashMap;
use ignore::WalkBuilder;
use jwalk::WalkDir;
use parking_lot::{Mutex, RwLock};
use serde::Serialize;
use tokio::sync::broadcast;
use tokio::time::sleep;

use crate::core::system::{get_current_os, OperatingSystem};

/// High-performance file system scanner with O(1) queries and real-time updates
///
/// Architecture:
/// 1. One-time O(N) parallel scan using jwalk/ignore
/// 2. Lock-free path index for O(1) lookups
/// 3. Real-time incremental updates via file system events
/// 4. Atomic aggregated caches (size, count) for instant queries

/// Core node structure with atomic aggregated fields
#[derive(Debug)]
pub struct FileNode {
    pub name: String,
    pub path: PathBuf,
    /// File size (for files) or 0 (for directories)
    pub file_size: u64,
    /// Total size including all descendants (atomic for lock-free reads)
    pub size_total: AtomicU64,
    /// Number of files in subtree
    pub file_count: AtomicU64,
    /// Number of directories in subtree
    pub dir_count: AtomicU64,
    /// Last modified time (max in subtree)
    pub last_modified: AtomicU64,
    /// Child nodes (protected by RwLock for concurrent access)
    pub children: RwLock<Vec<Arc<FileNode>>>,
    /// Whether this is a directory
    pub is_dir: bool,
}

/// Serializable version for frontend
#[derive(Debug, Serialize)]
pub struct FileNodeForJs {
    pub name: String,
    pub path: String,
    pub value: u64,
    pub file_count: u64,
    pub dir_count: u64,
    pub last_modified: u64,
    pub is_dir: bool,
    pub children: Vec<FileNodeForJs>,
    /// Whether this node has children (for lazy loading)
    pub has_children: bool,
    /// Whether children are loaded
    pub children_loaded: bool,
}

/// Events for real-time updates
#[derive(Debug, Clone)]
pub enum ScanEvent {
    /// New file/directory discovered during scan
    Discovered {
        path: PathBuf,
        size: u64,
        is_dir: bool,
        modified: u64,
    },
    /// File system event (create/modify/delete)
    FsEvent { path: PathBuf, kind: FsEventKind },
    /// Scan completed
    ScanComplete,
}

#[derive(Debug, Clone)]
pub enum FsEventKind {
    Created,
    Modified,
    Deleted,
}

/// Global state with lock-free design
pub struct FileSystemState {
    /// O(1) path lookup table
    pub path_index: DashMap<PathBuf, Arc<FileNode>>,
    /// Root node of the tree
    pub root: RwLock<Option<Arc<FileNode>>>,
    /// Current scan status
    pub scanning: AtomicBool,
    /// Should stop current operation
    pub should_stop: AtomicBool,
    /// Event channel for real-time updates
    pub event_sender: Mutex<Option<Sender<ScanEvent>>>,
    /// Currently watched path
    pub watch_path: RwLock<Option<PathBuf>>,
    /// Performance monitoring
    pub performance_monitor: RwLock<Option<Arc<PerformanceMonitor>>>,
}

impl Default for FileSystemState {
    fn default() -> Self {
        Self {
            path_index: DashMap::new(),
            root: RwLock::new(None),
            scanning: AtomicBool::new(false),
            should_stop: AtomicBool::new(false),
            event_sender: Mutex::new(None),
            watch_path: RwLock::new(None),
            performance_monitor: RwLock::new(None),
        }
    }
}

/// Global state instance
static FILE_SYSTEM: once_cell::sync::Lazy<FileSystemState> =
    once_cell::sync::Lazy::new(FileSystemState::default);

impl FileNode {
    /// Create a new file node
    pub fn new(path: PathBuf, size: u64, is_dir: bool, modified: u64) -> Self {
        let name = path
            .file_name()
            .and_then(|n| n.to_str())
            .unwrap_or("")
            .to_string();

        Self {
            name,
            path,
            file_size: size,
            size_total: AtomicU64::new(size),
            file_count: AtomicU64::new(if is_dir { 0 } else { 1 }),
            dir_count: AtomicU64::new(if is_dir { 1 } else { 0 }),
            last_modified: AtomicU64::new(modified),
            children: RwLock::new(Vec::new()),
            is_dir,
        }
    }

    /// Add a child node and update aggregated values atomically
    pub fn add_child(&self, child: Arc<FileNode>) {
        // Update aggregated values
        let child_size = child.size_total.load(Ordering::Relaxed);
        let child_files = child.file_count.load(Ordering::Relaxed);
        let child_dirs = child.dir_count.load(Ordering::Relaxed);
        let child_modified = child.last_modified.load(Ordering::Relaxed);

        self.size_total.fetch_add(child_size, Ordering::Relaxed);
        self.file_count.fetch_add(child_files, Ordering::Relaxed);
        self.dir_count.fetch_add(child_dirs, Ordering::Relaxed);

        // Update last modified to max
        let current_modified = self.last_modified.load(Ordering::Relaxed);
        if child_modified > current_modified {
            self.last_modified.store(child_modified, Ordering::Relaxed);
        }

        // Add to children
        self.children.write().push(child);
    }

    /// Remove a child and update aggregated values
    pub fn remove_child(&self, path: &Path) -> Option<Arc<FileNode>> {
        let mut children = self.children.write();
        if let Some(pos) = children.iter().position(|c| c.path == path) {
            let removed = children.remove(pos);

            // Update aggregated values (subtract)
            let child_size = removed.size_total.load(Ordering::Relaxed);
            let child_files = removed.file_count.load(Ordering::Relaxed);
            let child_dirs = removed.dir_count.load(Ordering::Relaxed);

            self.size_total.fetch_sub(child_size, Ordering::Relaxed);
            self.file_count.fetch_sub(child_files, Ordering::Relaxed);
            self.dir_count.fetch_sub(child_dirs, Ordering::Relaxed);

            Some(removed)
        } else {
            None
        }
    }

    /// Get current aggregated values (lock-free read)
    pub fn get_stats(&self) -> (u64, u64, u64, u64) {
        (
            self.size_total.load(Ordering::Relaxed),
            self.file_count.load(Ordering::Relaxed),
            self.dir_count.load(Ordering::Relaxed),
            self.last_modified.load(Ordering::Relaxed),
        )
    }
}

impl FileNodeForJs {
    /// Convert FileNode to FileNodeForJs with depth limit for lazy loading
    pub fn from_node_with_depth(node: &FileNode, max_depth: usize, current_depth: usize) -> Self {
        Self::from_node_with_config(node, max_depth, current_depth, &LazyLoadConfig::default())
    }

    /// Convert FileNode to FileNodeForJs with configuration
    pub fn from_node_with_config(
        node: &FileNode,
        max_depth: usize,
        current_depth: usize,
        config: &LazyLoadConfig,
    ) -> Self {
        let (size_total, file_count, dir_count, last_modified) = node.get_stats();

        let children_guard = node.children.read();
        let has_children = !children_guard.is_empty();

        // Only load children if we haven't reached max depth
        let (children_js, children_loaded) = if current_depth < max_depth {
            // Filter and limit children for display
            let mut children: Vec<FileNodeForJs> = children_guard
                .iter()
                .filter(|child| {
                    // Filter by size threshold
                    let (child_size, _, _, _) = child.get_stats();
                    child_size >= config.min_size_threshold || child.is_dir
                })
                .take(config.max_children_per_level) // Limit number of children
                .map(|child| {
                    FileNodeForJs::from_node_with_config(
                        child.as_ref(),
                        max_depth,
                        current_depth + 1,
                        config,
                    )
                })
                .collect();

            // Sort by size (largest first) for better UX
            children.sort_by(|a, b| b.value.cmp(&a.value));

            (children, true)
        } else {
            (Vec::new(), false)
        };

        // For treemap visualization, each node's value should represent its TOTAL size
        // This includes all children, whether they are visible or filtered out
        // The treemap will handle the visual layout by drawing children inside parents
        FileNodeForJs {
            name: node.name.clone(),
            path: node.path.to_string_lossy().to_string(),
            value: size_total, // Use total size, not adjusted
            file_count,
            dir_count,
            last_modified,
            is_dir: node.is_dir,
            children: children_js,
            has_children,
            children_loaded,
        }
    }
}

impl From<&FileNode> for FileNodeForJs {
    fn from(node: &FileNode) -> Self {
        // Default behavior: load all children (backward compatibility)
        FileNodeForJs::from_node_with_depth(node, usize::MAX, 0)
    }
}

/// High-performance directory scanner using parallel traversal
pub struct HighPerformanceScanner {
    /// Channel for sending scan events
    event_sender: Sender<ScanEvent>,
    /// Should stop scanning
    should_stop: Arc<AtomicBool>,
}

impl HighPerformanceScanner {
    pub fn new(event_sender: Sender<ScanEvent>, should_stop: Arc<AtomicBool>) -> Self {
        Self {
            event_sender,
            should_stop,
        }
    }

    /// Scan directory using jwalk for maximum parallel I/O performance
    pub fn scan_with_jwalk(
        &self,
        root_path: &Path,
    ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
        let sender = self.event_sender.clone();
        let should_stop = Arc::clone(&self.should_stop);

        // Configure jwalk with optimized settings
        let walker = WalkDir::new(root_path)
            .skip_hidden(false) // Include hidden files
            .follow_links(false) // Don't follow symlinks to avoid cycles
            .sort(false); // Don't sort for maximum speed

        let mut batch_count = 0;
        const BATCH_SIZE: usize = 1000; // Send events in batches for better performance
        let mut batch_events = Vec::with_capacity(BATCH_SIZE);

        // Iterate over entries with optimized batching
        for entry_result in walker {
            if should_stop.load(Ordering::Relaxed) {
                break;
            }

            let entry = match entry_result {
                Ok(e) => e,
                Err(_) => continue,
            };

            let path = entry.path();

            // Early filtering of paths we definitely don't want to include
            if should_exclude_path(&path) {
                continue;
            }

            // Determine whether this entry is a directory
            let is_dir = entry.file_type().is_dir();

            // Optimized metadata handling - avoid unnecessary calls
            let (size, modified) = if is_dir {
                (0u64, 0u64)
            } else {
                // Use cached metadata from jwalk when available
                match entry.metadata() {
                    Ok(md) => {
                        let modified = md
                            .modified()
                            .ok()
                            .and_then(|t| t.duration_since(std::time::UNIX_EPOCH).ok())
                            .map(|d| d.as_secs())
                            .unwrap_or(0);
                        (md.len(), modified)
                    }
                    Err(_) => (0u64, 0u64),
                }
            };

            // Add to batch
            batch_events.push(ScanEvent::Discovered {
                path: path.to_path_buf(),
                size,
                is_dir,
                modified,
            });

            batch_count += 1;

            // Send batch when full or periodically
            if batch_count >= BATCH_SIZE {
                for event in batch_events.drain(..) {
                    if sender.send(event).is_err() {
                        should_stop.store(true, Ordering::Relaxed);
                        break;
                    }
                }
                batch_count = 0;
            }
        }

        // Send remaining events in batch
        for event in batch_events.drain(..) {
            if sender.send(event).is_err() {
                break;
            }
        }

        // Send completion event
        let _ = sender.send(ScanEvent::ScanComplete);
        Ok(())
    }

    /// Alternative: scan using ignore crate (better for respecting .gitignore)
    pub fn scan_with_ignore(
        &self,
        root_path: &Path,
    ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
        let sender = self.event_sender.clone();
        let should_stop = Arc::clone(&self.should_stop);

        let mut builder = WalkBuilder::new(root_path);
        builder
            .hidden(false) // Include hidden files
            .ignore(true) // Respect .gitignore
            .git_ignore(true) // Respect .gitignore
            .git_global(false) // Don't use global git ignore
            .git_exclude(false) // Don't use git exclude
            .threads(num_cpus::get()); // Use all CPU cores

        let walker = builder.build_parallel();

        walker.run(|| {
            let sender = sender.clone();
            let should_stop = Arc::clone(&should_stop);

            Box::new(move |entry| {
                if should_stop.load(Ordering::Relaxed) {
                    return ignore::WalkState::Quit;
                }

                let entry = match entry {
                    Ok(entry) => entry,
                    Err(_) => return ignore::WalkState::Continue,
                };

                let path = entry.path();
                if should_exclude_path(path) {
                    return ignore::WalkState::Skip;
                }

                if let Ok(metadata) = entry.metadata() {
                    let size = if metadata.is_file() {
                        metadata.len()
                    } else {
                        0
                    };
                    let modified = metadata
                        .modified()
                        .ok()
                        .and_then(|t| t.duration_since(std::time::UNIX_EPOCH).ok())
                        .map(|d| d.as_secs())
                        .unwrap_or(0);

                    let event = ScanEvent::Discovered {
                        path: path.to_path_buf(),
                        size,
                        is_dir: metadata.is_dir(),
                        modified,
                    };

                    if sender.send(event).is_err() {
                        return ignore::WalkState::Quit;
                    }
                }

                ignore::WalkState::Continue
            })
        });

        let _ = sender.send(ScanEvent::ScanComplete);
        Ok(())
    }
}

/// Fast path exclusion check (optimized for early filtering)
fn should_exclude_path(path: &Path) -> bool {
    let os = get_current_os();

    // Convert to string once for multiple checks
    let path_str = match path.to_str() {
        Some(s) => s,
        None => return false, // Invalid UTF-8, don't exclude
    };

    match os {
        OperatingSystem::Linux => {
            // Exclude virtual filesystems and system directories
            path_str.starts_with("/proc")
                || path_str.starts_with("/sys")
                || path_str.starts_with("/dev")
                || path_str.starts_with("/run")
                || path_str.starts_with("/tmp")
        }
        OperatingSystem::MacOS => {
            // Exclude system volumes and problematic directories
            path_str.starts_with("/System/Volumes")
                || path_str.starts_with("/private/var/vm")
                || path_str.starts_with("/Volumes")
                || path_str.contains("/Library/Group Containers")
                || path_str.contains("/.Trash")
        }
        OperatingSystem::Windows => {
            // Exclude Windows system directories
            let lower = path_str.to_lowercase();
            lower.contains("\\windows\\winsxs")
                || lower.contains("\\system volume information")
                || lower.contains("\\$recycle.bin")
                || lower.contains("\\pagefile.sys")
                || lower.contains("\\hiberfil.sys")
        }
        _ => false,
    }
}

/// Tree builder that processes scan events and builds the in-memory tree
pub struct TreeBuilder {
    /// Root node of the tree
    root: Option<Arc<FileNode>>,
    /// Path to node mapping for O(1) lookups
    path_index: DashMap<PathBuf, Arc<FileNode>>,
}

impl TreeBuilder {
    pub fn new() -> Self {
        Self {
            root: None,
            path_index: DashMap::new(),
        }
    }

    /// Process a scan event and update the tree
    pub fn process_event(
        &mut self,
        event: ScanEvent,
    ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
        match event {
            ScanEvent::Discovered {
                path,
                size,
                is_dir,
                modified,
            } => {
                self.add_node(path, size, is_dir, modified)?;
            }
            ScanEvent::FsEvent { path, kind } => {
                self.handle_fs_event(path, kind)?;
            }
            ScanEvent::ScanComplete => {
                // Scan completed, could trigger UI update
            }
        }
        Ok(())
    }

    /// Add a new node to the tree with O(1) parent lookup
    fn add_node(
        &mut self,
        path: PathBuf,
        size: u64,
        is_dir: bool,
        modified: u64,
    ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
        // Handle root node
        if self.root.is_none() {
            let node = Arc::new(FileNode::new(path.clone(), size, is_dir, modified));
            self.root = Some(Arc::clone(&node));
            self.path_index.insert(path, node);
            return Ok(());
        }

        // Check if node already exists (created by ensure_parent_chain)
        if let Some(existing_node) = self.path_index.get(&path) {
            // Node already exists - this happens when a directory was created by ensure_parent_chain
            // and now we're discovering it during the actual scan
            if is_dir {
                // Update the existing directory node with actual metadata
                // Don't overwrite size_total as it may already contain aggregated child sizes
                existing_node
                    .last_modified
                    .store(modified, Ordering::Relaxed);
                return Ok(());
            } else {
                // This shouldn't happen for files, but handle it gracefully
                eprintln!("Warning: File already exists in tree: {}", path.display());
                return Ok(());
            }
        }

        // Create the new node
        let node = Arc::new(FileNode::new(path.clone(), size, is_dir, modified));

        // Add to index
        self.path_index.insert(path.clone(), Arc::clone(&node));

        // Find parent node using O(1) lookup
        if let Some(parent_path) = path.parent() {
            let parent_path_buf = parent_path.to_path_buf();

            // Ensure parent chain exists first
            self.ensure_parent_chain(&parent_path_buf, modified)?;

            // Now parent should exist, add child to it
            if let Some(parent_node) = self.path_index.get(&parent_path_buf) {
                let (child_size, child_files, child_dirs, child_modified) = node.get_stats();
                parent_node.add_child(Arc::clone(&node));
                let modified_opt = if child_modified > 0 {
                    Some(child_modified)
                } else {
                    None
                };
                self.update_aggregates_up_tree(
                    &parent_path_buf,
                    child_size as i64,
                    child_files as i64,
                    child_dirs as i64,
                    modified_opt,
                );
            } else {
                // This should not happen if ensure_parent_chain worked correctly
                eprintln!(
                    "Warning: Parent node not found after ensure_parent_chain: {}",
                    parent_path_buf.display()
                );

                // Fallback: create parent directly
                let parent_node =
                    Arc::new(FileNode::new(parent_path_buf.clone(), 0, true, modified));
                self.path_index
                    .insert(parent_path_buf.clone(), Arc::clone(&parent_node));
                let (child_size, child_files, child_dirs, child_modified) = node.get_stats();
                parent_node.add_child(Arc::clone(&node));
                let modified_opt = if child_modified > 0 {
                    Some(child_modified)
                } else {
                    None
                };
                self.update_aggregates_up_tree(
                    &parent_path_buf,
                    child_size as i64,
                    child_files as i64,
                    child_dirs as i64,
                    modified_opt,
                );
            }
        }

        Ok(())
    }

    /// Ensure all parents in the chain exist (for sparse tree building)
    fn ensure_parent_chain(
        &mut self,
        path: &Path,
        modified: u64,
    ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
        // Collect all missing parents from bottom to top
        let mut missing_parents = Vec::new();
        let mut current_path = path;

        while let Some(parent_path) = current_path.parent() {
            if !self.path_index.contains_key(parent_path) {
                missing_parents.push(parent_path.to_path_buf());
            } else {
                // Found existing parent, stop here
                break;
            }
            current_path = parent_path;
        }

        // Create missing parents from top to bottom (reverse order)
        missing_parents.reverse();

        for parent_path in missing_parents {
            // Create parent node
            let parent_node = Arc::new(FileNode::new(parent_path.clone(), 0, true, modified));

            // Add to index
            self.path_index
                .insert(parent_path.clone(), Arc::clone(&parent_node));

            // Connect to its parent (which should exist now)
            if let Some(grandparent_path) = parent_path.parent() {
                let (size_total, file_total, dir_total, last_modified) = parent_node.get_stats();
                let modified_opt = if last_modified > 0 {
                    Some(last_modified)
                } else {
                    None
                };

                if let Some(grandparent) = self.path_index.get(grandparent_path) {
                    grandparent.add_child(Arc::clone(&parent_node));
                    self.update_aggregates_up_tree(
                        grandparent_path,
                        size_total as i64,
                        file_total as i64,
                        dir_total as i64,
                        modified_opt,
                    );
                } else if grandparent_path
                    == self
                        .root
                        .as_ref()
                        .map(|r| r.path.as_path())
                        .unwrap_or(Path::new(""))
                {
                    // Special case: connecting to root
                    if let Some(root) = &self.root {
                        root.add_child(Arc::clone(&parent_node));
                        self.update_aggregates_up_tree(
                            grandparent_path,
                            size_total as i64,
                            file_total as i64,
                            dir_total as i64,
                            modified_opt,
                        );
                    }
                }
            }
        }

        Ok(())
    }

    /// Handle file system events for real-time updates
    fn handle_fs_event(
        &mut self,
        path: PathBuf,
        kind: FsEventKind,
    ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
        match kind {
            FsEventKind::Created => {
                // Get file info and add to tree
                if let Ok(metadata) = std::fs::metadata(&path) {
                    let size = if metadata.is_file() {
                        metadata.len()
                    } else {
                        0
                    };
                    let modified = metadata
                        .modified()
                        .ok()
                        .and_then(|t| t.duration_since(std::time::UNIX_EPOCH).ok())
                        .map(|d| d.as_secs())
                        .unwrap_or(0);
                    self.add_node(path, size, metadata.is_dir(), modified)?;
                }
            }
            FsEventKind::Modified => {
                // Update existing node
                if let Some(node) = self.path_index.get(&path) {
                    if let Ok(metadata) = std::fs::metadata(&path) {
                        if metadata.is_file() {
                            let new_size = metadata.len();
                            let current_total = node.size_total.load(Ordering::Relaxed) as i64;
                            let size_diff = new_size as i64 - current_total;
                            let modified_ts = metadata
                                .modified()
                                .ok()
                                .and_then(|t| t.duration_since(std::time::UNIX_EPOCH).ok())
                                .map(|d| d.as_secs());

                            if size_diff != 0 {
                                if size_diff > 0 {
                                    node.size_total
                                        .fetch_add(size_diff as u64, Ordering::Relaxed);
                                } else {
                                    node.size_total
                                        .fetch_sub(size_diff.unsigned_abs(), Ordering::Relaxed);
                                }
                                self.update_aggregates_up_tree(&path, size_diff, 0, 0, modified_ts);
                            }

                            if let Some(ts) = modified_ts {
                                node.last_modified.fetch_max(ts, Ordering::Relaxed);
                            }
                        }
                    }
                }
            }
            FsEventKind::Deleted => {
                // Remove from tree and update parent sizes
                if let Some((_, node)) = self.path_index.remove(&path) {
                    let size_to_subtract = node.size_total.load(Ordering::Relaxed) as i64;
                    let files_to_subtract = node.file_count.load(Ordering::Relaxed) as i64;
                    let dirs_to_subtract = node.dir_count.load(Ordering::Relaxed) as i64;

                    // Remove from parent and update sizes up the tree
                    if let Some(parent_path) = path.parent() {
                        if let Some(parent) = self.path_index.get(parent_path) {
                            parent.remove_child(&path);
                        }
                        self.update_aggregates_up_tree(
                            parent_path,
                            -size_to_subtract,
                            -files_to_subtract,
                            -dirs_to_subtract,
                            None,
                        );
                    }
                }
            }
        }
        Ok(())
    }

    /// Apply a signed difference to an atomic counter while avoiding underflow
    fn adjust_counter(counter: &AtomicU64, diff: i64) {
        if diff > 0 {
            counter.fetch_add(diff as u64, Ordering::Relaxed);
        } else if diff < 0 {
            counter.fetch_sub(diff.unsigned_abs(), Ordering::Relaxed);
        }
    }

    /// Propagate aggregate changes to ancestors (excluding the starting path itself)
    fn update_aggregates_up_tree(
        &self,
        mut path: &Path,
        size_diff: i64,
        file_diff: i64,
        dir_diff: i64,
        modified: Option<u64>,
    ) {
        if size_diff == 0 && file_diff == 0 && dir_diff == 0 && modified.is_none() {
            return;
        }

        while let Some(parent_path) = path.parent() {
            if let Some(parent) = self.path_index.get(parent_path) {
                Self::adjust_counter(&parent.size_total, size_diff);
                Self::adjust_counter(&parent.file_count, file_diff);
                Self::adjust_counter(&parent.dir_count, dir_diff);
                if let Some(ts) = modified {
                    parent.last_modified.fetch_max(ts, Ordering::Relaxed);
                }
            }
            path = parent_path;
        }
    }

    /// Get the root node
    pub fn get_root(&self) -> Option<Arc<FileNode>> {
        self.root.clone()
    }

    /// Get node by path (O(1) lookup)
    pub fn get_node(&self, path: &Path) -> Option<Arc<FileNode>> {
        self.path_index.get(path).map(|entry| Arc::clone(&*entry))
    }

    /// Get all paths in the index
    pub fn get_all_paths(&self) -> Vec<PathBuf> {
        self.path_index
            .iter()
            .map(|entry| entry.key().clone())
            .collect()
    }
}

/// High-performance scan implementation with real-time updates
async fn perform_high_performance_scan(
    root_path: PathBuf,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    // Create event channel for communication
    let (event_sender, event_receiver) = channel::unbounded();

    // Set up global state and performance monitoring
    FILE_SYSTEM.scanning.store(true, Ordering::Relaxed);
    FILE_SYSTEM.should_stop.store(false, Ordering::Relaxed);
    *FILE_SYSTEM.watch_path.write() = Some(root_path.clone());
    *FILE_SYSTEM.event_sender.lock() = Some(event_sender.clone());

    // Initialize performance monitor
    let performance_monitor = Arc::new(PerformanceMonitor::new());
    *FILE_SYSTEM.performance_monitor.write() = Some(Arc::clone(&performance_monitor));

    // Clear previous state
    FILE_SYSTEM.path_index.clear();
    *FILE_SYSTEM.root.write() = None;

    // Create scanner and tree builder
    let scanner = HighPerformanceScanner::new(event_sender, Arc::new(AtomicBool::new(false)));
    let mut tree_builder = TreeBuilder::new();

    // Start scanning in background thread
    let scan_path = root_path.clone();
    let scan_handle = tokio::task::spawn_blocking(move || {
        // Try jwalk first (fastest for deep directories)
        if let Err(_) = scanner.scan_with_jwalk(&scan_path) {
            // Fallback to ignore crate
            scanner.scan_with_ignore(&scan_path)
        } else {
            Ok(())
        }
    });

    // Process events and update global state with performance monitoring
    let perf_monitor = Arc::clone(&performance_monitor);
    let process_handle = tokio::task::spawn(async move {
        let mut last_update_time = std::time::Instant::now();
        let mut last_transferred_count = 0; // Track incremental updates
        const MIN_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(500);

        while let Ok(event) = event_receiver.recv() {
            let is_complete = matches!(event, ScanEvent::ScanComplete);

            // Update performance metrics based on event
            if let ScanEvent::Discovered { size, is_dir, .. } = &event {
                perf_monitor.increment_node(*size, !is_dir);
                perf_monitor.update_rates();
            }

            if let Err(e) = tree_builder.process_event(event) {
                eprintln!("Error processing scan event: {}", e);
                continue;
            }

            let now = std::time::Instant::now();
            let elapsed_since_update = now.duration_since(last_update_time);

            // Time-based updates for consistent performance
            let should_update = elapsed_since_update >= MIN_UPDATE_INTERVAL || is_complete;

            if should_update {
                last_update_time = now;

                // Update root
                if let Some(root) = tree_builder.get_root() {
                    *FILE_SYSTEM.root.write() = Some(Arc::clone(&root));
                }

                // Incremental update: only transfer new nodes since last update
                let current_count = tree_builder.path_index.len();
                if current_count > last_transferred_count {
                    // Only iterate through new entries (approximate)
                    for entry in tree_builder.path_index.iter().skip(last_transferred_count) {
                        if !FILE_SYSTEM.path_index.contains_key(entry.key()) {
                            FILE_SYSTEM
                                .path_index
                                .insert(entry.key().clone(), Arc::clone(&*entry));
                        }
                    }
                    last_transferred_count = current_count;
                }

                // Performance logging removed for production
            }

            if is_complete {
                break;
            }
        }

        // Final comprehensive update to ensure all data is transferred
        if let Some(root) = tree_builder.get_root() {
            *FILE_SYSTEM.root.write() = Some(root);
        }

        // Final comprehensive update of path index
        for entry in tree_builder.path_index.iter() {
            FILE_SYSTEM
                .path_index
                .insert(entry.key().clone(), Arc::clone(&*entry));
        }

        // Mark scanning as complete so that the WebSocket update task can terminate promptly.
        FILE_SYSTEM.scanning.store(false, Ordering::Relaxed);
    });

    // Separate task for timed WebSocket updates
    let update_handle = tokio::task::spawn(async move {
        const UPDATE_INTERVAL: Duration = Duration::from_millis(2000); // Update every 2 seconds
        let mut interval = tokio::time::interval(UPDATE_INTERVAL);

        loop {
            interval.tick().await;

            // Check if scanning is still active
            if !FILE_SYSTEM.scanning.load(Ordering::Relaxed) {
                break;
            }

            // Send progress update
            if let Err(e) = notify_scan_progress().await {
                eprintln!("Error notifying scan progress: {}", e);
            }
        }
    });

    // Wait for all tasks to complete
    let (scan_result, process_result, update_result) =
        tokio::join!(scan_handle, process_handle, update_handle);

    if let Err(e) = scan_result {
        eprintln!("Scan task error: {}", e);
    }

    if let Err(e) = process_result {
        eprintln!("Process task error: {}", e);
    }

    if let Err(e) = update_result {
        eprintln!("Update task error: {}", e);
    }

    // 一次性自底向上修正聚合值
    if let Err(e) = fix_size_aggregations() {
        eprintln!("Aggregation fix error: {}", e);
    }

    // Send final notification
    if let Err(e) = notify_scan_complete().await {
        eprintln!("Error notifying scan complete: {}", e);
    }

    Ok(())
}

/// Start high-performance folder scanning
#[tauri::command]
pub async fn start_scan_folder(path: String) -> Result<(), String> {
    // Stop any existing scan
    stop_scan_folder_and_clear().await;

    let root_path = PathBuf::from(path);

    // Validate path exists
    if !root_path.exists() {
        return Err(format!("Path does not exist: {}", root_path.display()));
    }

    if !root_path.is_dir() {
        return Err(format!("Path is not a directory: {}", root_path.display()));
    }

    // Start the high-performance scan
    tokio::spawn(async move {
        if let Err(e) = perform_high_performance_scan(root_path).await {
            eprintln!("Scan failed: {}", e);
            // Ensure scanning flag is set to false even on error
            FILE_SYSTEM.scanning.store(false, Ordering::Relaxed);
        }
    });

    Ok(())
}

/// Get folder information with O(1) performance
#[tauri::command]
pub fn get_folder_info() -> Vec<FileNodeForJs> {
    get_folder_info_with_depth(2) // Default to 2 levels for better performance
}

/// Configuration for lazy loading
#[derive(Debug, Serialize)]
pub struct LazyLoadConfig {
    pub default_depth: usize,
    pub max_children_per_level: usize,
    pub min_size_threshold: u64, // Only show nodes above this size
}

impl Default for LazyLoadConfig {
    fn default() -> Self {
        Self {
            default_depth: 5,
            max_children_per_level: 50, // Limit children to prevent UI overload
            min_size_threshold: 10 * 1024, // 10KB minimum (much more reasonable)
        }
    }
}

/// Get folder information with specified depth limit
#[tauri::command]
pub fn get_folder_info_with_depth(max_depth: usize) -> Vec<FileNodeForJs> {
    if let Some(root) = FILE_SYSTEM.root.read().as_ref() {
        FileNodeForJs::from_node_with_depth(root.as_ref(), max_depth, 0).children
    } else {
        Vec::new()
    }
}

/// Get children of a specific path for lazy loading
#[tauri::command]
pub fn get_children_by_path(path: String, max_depth: usize) -> Result<Vec<FileNodeForJs>, String> {
    let path_buf = PathBuf::from(path.clone());

    if let Some(node) = FILE_SYSTEM.path_index.get(&path_buf) {
        let children = node.children.read();
        let config = LazyLoadConfig::default();
        let children_js: Vec<FileNodeForJs> = children
            .iter()
            .filter(|child| {
                // Apply same filtering as in from_node_with_config
                let (child_size, _, _, _) = child.get_stats();
                child_size >= config.min_size_threshold || child.is_dir
            })
            .take(config.max_children_per_level)
            .map(|child| FileNodeForJs::from_node_with_depth(child.as_ref(), max_depth, 0))
            .collect();
        Ok(children_js)
    } else {
        Err(format!("Path not found: {}", path))
    }
}

/// Get lazy loading configuration
#[tauri::command]
pub fn get_lazy_load_config() -> LazyLoadConfig {
    LazyLoadConfig::default()
}

/// Check if currently scanning
#[tauri::command]
pub fn is_scanning() -> bool {
    FILE_SYSTEM.scanning.load(Ordering::Relaxed)
}

#[tauri::command]
pub fn get_recommend_folders(current_path: String) -> Result<Vec<String>, String> {
    let path = std::path::Path::new(&current_path);
    let dir_to_read = if path.exists() {
        path.to_path_buf()
    } else {
        if let Some(parent) = path.parent() {
            parent.to_path_buf()
        } else {
            path.to_path_buf()
        }
    };
    let dir_to_read = dir_to_read
        .canonicalize()
        .map_err(|e| format!("Error canonicalizing path: {}", e))?;
    if !dir_to_read.exists() {
        return Err(format!(
            "Path does not exist: {}",
            dir_to_read.to_str().unwrap()
        ));
    }

    let entries = std::fs::read_dir(&dir_to_read)
        .or_else(|_| std::fs::read_dir(dir_to_read.parent().unwrap_or(&dir_to_read)))
        .map_err(|e| format!("Error reading directory: {}", e))?;

    let lowercase_current_path = current_path.to_lowercase();
    let result: Vec<String> = entries
        .filter_map(|entry| {
            entry.ok().and_then(|entry| {
                let entry_path = entry.path();
                let path_str = entry_path.to_str()?;
                if entry_path.is_dir()
                    && path_str.to_lowercase().starts_with(&lowercase_current_path)
                {
                    Some(path_str.to_string())
                } else {
                    None
                }
            })
        })
        .collect();

    Ok(result)
}

/// Stop scanning and clear all data
#[tauri::command]
pub async fn stop_scan_folder_and_clear() {
    // Signal stop
    FILE_SYSTEM.should_stop.store(true, Ordering::Relaxed);
    FILE_SYSTEM.scanning.store(false, Ordering::Relaxed);

    // Clear state
    *FILE_SYSTEM.watch_path.write() = None;
    *FILE_SYSTEM.root.write() = None;
    FILE_SYSTEM.path_index.clear();
    *FILE_SYSTEM.event_sender.lock() = None;

    // Give some time for background tasks to stop
    sleep(Duration::from_millis(100)).await;
}

/// Delete a file or directory
#[tauri::command]
pub async fn delete_path(path: String) -> Result<(), String> {
    let path = Path::new(&path);

    if path.is_dir() {
        std::fs::remove_dir_all(path).map_err(|e| format!("Failed to delete directory: {}", e))?;
    } else {
        std::fs::remove_file(path).map_err(|e| format!("Failed to delete file: {}", e))?;
    }

    // Update tree by removing the node
    let path_buf = path.to_path_buf();
    if let Some((_, _node)) = FILE_SYSTEM.path_index.remove(&path_buf) {
        // Update parent sizes
        if let Some(parent_path) = path.parent() {
            if let Some(parent) = FILE_SYSTEM.path_index.get(parent_path) {
                parent.remove_child(&path_buf);
            }
        }
    }

    Ok(())
}

/// Handle file system events for real-time updates
pub async fn handle_fs_event(event: notify::Event) {
    let watch_path = FILE_SYSTEM.watch_path.read().clone();

    if let Some(watch_root) = watch_path {
        for path in event.paths {
            // Only process events within our watched directory
            if !path.starts_with(&watch_root) {
                continue;
            }

            // Determine event kind
            let fs_event_kind = match event.kind {
                notify::EventKind::Create(_) => FsEventKind::Created,
                notify::EventKind::Modify(_) => FsEventKind::Modified,
                notify::EventKind::Remove(_) => FsEventKind::Deleted,
                _ => continue, // Ignore other event types
            };

            // Send event to processing channel if available
            if let Some(sender) = FILE_SYSTEM.event_sender.lock().as_ref() {
                let scan_event = ScanEvent::FsEvent {
                    path: path.clone(),
                    kind: fs_event_kind.clone(),
                };

                if sender.send(scan_event).is_err() {
                    // Channel closed, stop processing
                    break;
                }
            } else {
                // No active scan, process immediately
                let mut tree_builder = TreeBuilder::new();
                if let Err(e) = tree_builder.handle_fs_event(path, fs_event_kind) {
                    eprintln!("Error handling fs event: {}", e);
                }
            }
        }
    }
}

/// Get node by path with O(1) lookup
#[tauri::command]
pub fn get_node_by_path(path: String) -> Option<FileNodeForJs> {
    let path_buf = PathBuf::from(path);
    FILE_SYSTEM
        .path_index
        .get(&path_buf)
        .map(|node| FileNodeForJs::from(node.as_ref()))
}

/// Get current watch path
pub fn get_current_watch_path() -> Option<PathBuf> {
    FILE_SYSTEM.watch_path.read().clone()
}

/// Get scan statistics
#[tauri::command]
pub fn get_scan_stats() -> ScanStats {
    let total_nodes = FILE_SYSTEM.path_index.len();
    let (total_size, file_count, dir_count, _) =
        if let Some(root) = FILE_SYSTEM.root.read().as_ref() {
            root.get_stats()
        } else {
            (0, 0, 0, 0)
        };

    ScanStats {
        total_nodes,
        total_size,
        file_count,
        dir_count,
        is_scanning: FILE_SYSTEM.scanning.load(Ordering::Relaxed),
    }
}

/// Get performance statistics
#[tauri::command]
pub fn get_performance_stats() -> Option<PerformanceStats> {
    FILE_SYSTEM
        .performance_monitor
        .read()
        .as_ref()
        .map(|monitor| monitor.get_detailed_stats())
}

#[derive(Debug, Serialize)]
pub struct ScanStats {
    pub total_nodes: usize,
    pub total_size: u64,
    pub file_count: u64,
    pub dir_count: u64,
    pub is_scanning: bool,
}

/// Enhanced performance monitoring with detailed metrics
#[derive(Debug)]
pub struct PerformanceMonitor {
    start_time: Instant,
    nodes_processed: AtomicU64,
    bytes_processed: AtomicU64,
    files_processed: AtomicU64,
    dirs_processed: AtomicU64,
    last_update_time: parking_lot::Mutex<Instant>,
    nodes_per_sec: AtomicU64, // Store as integer (nodes * 100 for 2 decimal places)
    bytes_per_sec: AtomicU64,
    eta_seconds: AtomicU64,
    estimated_total_nodes: AtomicU64,
}

impl PerformanceMonitor {
    pub fn new() -> Self {
        let now = Instant::now();
        Self {
            start_time: now,
            nodes_processed: AtomicU64::new(0),
            bytes_processed: AtomicU64::new(0),
            files_processed: AtomicU64::new(0),
            dirs_processed: AtomicU64::new(0),
            last_update_time: parking_lot::Mutex::new(now),
            nodes_per_sec: AtomicU64::new(0),
            bytes_per_sec: AtomicU64::new(0),
            eta_seconds: AtomicU64::new(0),
            estimated_total_nodes: AtomicU64::new(0),
        }
    }

    pub fn increment_node(&self, size: u64, is_file: bool) {
        self.nodes_processed.fetch_add(1, Ordering::Relaxed);
        self.bytes_processed.fetch_add(size, Ordering::Relaxed);

        if is_file {
            self.files_processed.fetch_add(1, Ordering::Relaxed);
        } else {
            self.dirs_processed.fetch_add(1, Ordering::Relaxed);
        }
    }

    pub fn update_rates(&self) {
        let now = Instant::now();
        let mut last_update = self.last_update_time.lock();
        let time_diff = now.duration_since(*last_update).as_secs_f64();

        if time_diff >= 1.0 {
            // Update rates every second
            let elapsed = self.start_time.elapsed().as_secs_f64();
            let nodes = self.nodes_processed.load(Ordering::Relaxed);
            let bytes = self.bytes_processed.load(Ordering::Relaxed);

            if elapsed > 0.0 {
                let nodes_per_sec = (nodes as f64 / elapsed * 100.0) as u64; // Store as integer * 100
                let bytes_per_sec = (bytes as f64 / elapsed) as u64;

                self.nodes_per_sec.store(nodes_per_sec, Ordering::Relaxed);
                self.bytes_per_sec.store(bytes_per_sec, Ordering::Relaxed);

                // Estimate ETA based on current progress
                let estimated_total = self.estimated_total_nodes.load(Ordering::Relaxed);
                if estimated_total > nodes && nodes_per_sec > 0 {
                    let remaining_nodes = estimated_total - nodes;
                    let eta = (remaining_nodes as f64 / (nodes_per_sec as f64 / 100.0)) as u64;
                    self.eta_seconds.store(eta, Ordering::Relaxed);
                }
            }

            *last_update = now;
        }
    }

    pub fn set_estimated_total(&self, total: u64) {
        self.estimated_total_nodes.store(total, Ordering::Relaxed);
    }

    pub fn get_detailed_stats(&self) -> PerformanceStats {
        let elapsed = self.start_time.elapsed();
        let nodes = self.nodes_processed.load(Ordering::Relaxed);
        let bytes = self.bytes_processed.load(Ordering::Relaxed);
        let files = self.files_processed.load(Ordering::Relaxed);
        let dirs = self.dirs_processed.load(Ordering::Relaxed);
        let nodes_per_sec = self.nodes_per_sec.load(Ordering::Relaxed) as f64 / 100.0;
        let bytes_per_sec = self.bytes_per_sec.load(Ordering::Relaxed);
        let eta = self.eta_seconds.load(Ordering::Relaxed);
        let estimated_total = self.estimated_total_nodes.load(Ordering::Relaxed);

        let progress_percent = if estimated_total > 0 {
            (nodes as f64 / estimated_total as f64 * 100.0).min(100.0)
        } else {
            0.0
        };

        PerformanceStats {
            elapsed_seconds: elapsed.as_secs(),
            elapsed_ms: elapsed.as_millis() as u64,
            nodes_processed: nodes,
            bytes_processed: bytes,
            files_processed: files,
            dirs_processed: dirs,
            nodes_per_second: nodes_per_sec,
            bytes_per_second: bytes_per_sec,
            eta_seconds: eta,
            estimated_total_nodes: estimated_total,
            progress_percent,
        }
    }
}

#[derive(Debug, Serialize)]
pub struct PerformanceStats {
    pub elapsed_seconds: u64,
    pub elapsed_ms: u64,
    pub nodes_processed: u64,
    pub bytes_processed: u64,
    pub files_processed: u64,
    pub dirs_processed: u64,
    pub nodes_per_second: f64,
    pub bytes_per_second: u64,
    pub eta_seconds: u64,
    pub estimated_total_nodes: u64,
    pub progress_percent: f64,
}

/// Global WebSocket broadcast sender for real-time updates
static WEBSOCKET_SENDER: once_cell::sync::Lazy<
    parking_lot::Mutex<Option<broadcast::Sender<String>>>,
> = once_cell::sync::Lazy::new(|| parking_lot::Mutex::new(None));

/// Set the WebSocket broadcast sender
pub fn set_websocket_sender(sender: broadcast::Sender<String>) {
    *WEBSOCKET_SENDER.lock() = Some(sender);
}

/// Notify WebSocket clients about scan progress with performance metrics
async fn notify_scan_progress() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    // Check if scanning is still active before sending progress
    if !FILE_SYSTEM.scanning.load(Ordering::Relaxed) {
        return Ok(());
    }

    let stats = get_scan_stats();
    let perf_stats = get_performance_stats();

    let message = serde_json::json!({
        "type": "scan_progress",
        "data": {
            "total_nodes": stats.total_nodes,
            "total_size": stats.total_size,
            "file_count": stats.file_count,
            "dir_count": stats.dir_count,
            "is_scanning": stats.is_scanning,
            "performance": perf_stats
        }
    })
    .to_string();

    if let Some(sender) = WEBSOCKET_SENDER.lock().as_ref() {
        let _ = sender.send(message);
    }

    Ok(())
}

/// Notify WebSocket clients about scan completion with final performance metrics
async fn notify_scan_complete() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    let stats = get_scan_stats();
    let perf_stats = get_performance_stats();

    let message = serde_json::json!({
        "type": "scan_complete",
        "data": {
            "total_nodes": stats.total_nodes,
            "total_size": stats.total_size,
            "file_count": stats.file_count,
            "dir_count": stats.dir_count,
            "is_scanning": false, // Explicitly set to false for completion
            "performance": perf_stats
        }
    })
    .to_string();

    if let Some(sender) = WEBSOCKET_SENDER.lock().as_ref() {
        let _ = sender.send(message);
    }

    Ok(())
}

/// Recalculate and fix size aggregations for all nodes
#[tauri::command]
pub fn fix_size_aggregations() -> Result<String, String> {
    let mut fixed_count = 0;
    let mut total_nodes = 0;

    // First pass: reset all aggregated values to just the node's own contribution
    for entry in FILE_SYSTEM.path_index.iter() {
        let node = entry.value();
        total_nodes += 1;

        if node.is_dir {
            // For directories, reset to 0 (they don't have their own size)
            node.size_total.store(0, Ordering::Relaxed);
            node.file_count.store(0, Ordering::Relaxed);
            node.dir_count.store(1, Ordering::Relaxed); // Count itself
        } else {
            // For files, reset to their own file size
            node.size_total.store(node.file_size, Ordering::Relaxed);
            node.file_count.store(1, Ordering::Relaxed);
            node.dir_count.store(0, Ordering::Relaxed);
        }
    }

    // Second pass: recalculate aggregations bottom-up
    // We need to process nodes in reverse depth order (deepest first)
    let mut nodes_by_depth: std::collections::BTreeMap<usize, Vec<Arc<FileNode>>> =
        std::collections::BTreeMap::new();

    for entry in FILE_SYSTEM.path_index.iter() {
        let node = entry.value();
        let depth = node.path.components().count();
        nodes_by_depth
            .entry(depth)
            .or_insert_with(Vec::new)
            .push(Arc::clone(&*node));
    }

    // Process from deepest to shallowest
    for (_depth, nodes) in nodes_by_depth.iter().rev() {
        for node in nodes {
            if node.is_dir {
                let children = node.children.read();
                let mut total_size = 0u64;
                let mut total_files = 0u64;
                let mut total_dirs = 1u64; // Count itself

                for child in children.iter() {
                    let (child_size, child_files, child_dirs, _) = child.get_stats();
                    total_size += child_size;
                    total_files += child_files;
                    total_dirs += child_dirs;
                }

                // Update aggregated values
                let old_size = node.size_total.swap(total_size, Ordering::Relaxed);
                let old_files = node.file_count.swap(total_files, Ordering::Relaxed);
                let old_dirs = node.dir_count.swap(total_dirs, Ordering::Relaxed);

                if old_size != total_size || old_files != total_files || old_dirs != total_dirs {
                    fixed_count += 1;
                }
            }
        }
    }

    Ok(format!(
        "Fixed {} out of {} nodes",
        fixed_count, total_nodes
    ))
}
