// Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]

use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use std::time::SystemTime;
use sha2::{Sha256, Digest};
use image::imageops::FilterType;
use std::io::Read;
use walkdir::WalkDir;
use std::time::Duration;
use std::time::UNIX_EPOCH;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Mutex;
use lru::LruCache;
use std::num::NonZeroUsize;
use base64::{Engine as _, engine::general_purpose};
use image::DynamicImage;

// 全局取消标志
static CANCEL_SCAN: AtomicBool = AtomicBool::new(false);

// 重置取消标志
fn reset_cancel_flag() {
    CANCEL_SCAN.store(false, Ordering::SeqCst);
}

// 检查是否应该取消
fn should_cancel() -> bool {
    CANCEL_SCAN.load(Ordering::SeqCst)
}

// 设置取消标志
fn set_cancel_flag() {
    CANCEL_SCAN.store(true, Ordering::SeqCst);
}

// 文件级缓存结构
#[derive(Debug, Serialize, Deserialize, Clone)]
struct FileCacheEntry {
    file_path: String,
    file_size: u64,
    modified_time: u64,
    file_hash: String,
    cache_timestamp: u64,
}

#[derive(Debug, Serialize, Deserialize)]
struct FileCache {
    entries: HashMap<String, FileCacheEntry>,
    cache_dir: String,
}

impl FileCache {
    fn new(cache_dir: &str) -> Self {
        let cache_file = format!("{}/file_cache.json", cache_dir);
        let mut cache = FileCache {
            entries: HashMap::new(),
            cache_dir: cache_dir.to_string(),
        };
        
        // 尝试加载现有缓存
        if let Ok(content) = fs::read_to_string(&cache_file) {
            if let Ok(entries) = serde_json::from_str::<HashMap<String, FileCacheEntry>>(&content) {
                cache.entries = entries;
            }
        }
        
        cache
    }
    
    fn get_cache_key(&self, file_path: &str, file_size: u64, modified_time: u64) -> String {
        let mut hasher = Sha256::new();
        hasher.update(file_path.as_bytes());
        hasher.update(&file_size.to_le_bytes());
        hasher.update(&modified_time.to_le_bytes());
        format!("{:x}", hasher.finalize())
    }
    
    fn get(&self, file_path: &str, file_size: u64, modified_time: u64) -> Option<String> {
        let key = self.get_cache_key(file_path, file_size, modified_time);
        if let Some(entry) = self.entries.get(&key) {
            // 检查缓存是否仍然有效
            if entry.file_path == file_path && 
               entry.file_size == file_size && 
               entry.modified_time == modified_time {
                return Some(entry.file_hash.clone());
            }
        }
        None
    }
    
    fn set(&mut self, file_path: String, file_size: u64, modified_time: u64, file_hash: String) {
        let key = self.get_cache_key(&file_path, file_size, modified_time);
        let entry = FileCacheEntry {
            file_path,
            file_size,
            modified_time,
            file_hash,
            cache_timestamp: SystemTime::now()
                .duration_since(SystemTime::UNIX_EPOCH)
                .unwrap_or_default()
                .as_secs(),
        };
        
        self.entries.insert(key, entry);
        self.save();
    }
    
    fn save(&self) {
        let cache_file = format!("{}/file_cache.json", self.cache_dir);
        if let Ok(content) = serde_json::to_string_pretty(&self.entries) {
            let _ = fs::write(cache_file, content);
        }
    }
    
    // 删除未使用的方法
    // fn cleanup_old_entries(&mut self, max_age_days: u64) {
    //     let now = SystemTime::now()
    //         .duration_since(SystemTime::UNIX_EPOCH)
    //         .unwrap_or_default()
    //         .as_secs();
    //     
    //     let max_age_seconds = max_age_days * 24 * 60 * 60;
    //     
    //     self.entries.retain(|_, entry| {
    //         now - entry.cache_timestamp < max_age_seconds
    //     });
    //     
    //     self.save();
    // }
}

#[derive(Debug, Serialize, Deserialize)]
struct SelectDirectoryResult {
    success: bool,
    path: Option<String>,
    error: Option<String>,
}

#[derive(Debug, Serialize, Deserialize, Clone)]
struct FileInfo {
    name: String,
    path: String,
    size: u64,
    modified: u64,
    hash: Option<String>,
    image_hash: Option<String>,
    audio_fingerprint: Option<String>,
}

#[derive(Debug, Serialize, Deserialize)]
struct DuplicateGroup {
    files: Vec<FileInfo>,
    size: u64,
    similarity_type: String,
}

#[derive(Debug, Serialize, Deserialize)]
struct ScanResult {
    total_files: u32,
    duplicate_groups: u32,
    duplicate_files: u32,
    saved_space: String,
    groups: Vec<DuplicateGroup>,
}

#[derive(Deserialize)]
struct ScanRequest {
    path: String,
    scan_mode: String,
    detection_mode: String,
    similarity_threshold: u32,
    min_file_size: u64,
    max_file_size: u64,
    file_extensions: Vec<String>,
}

#[derive(Debug, Serialize, Deserialize, Clone)]
struct ProgressUpdate {
    stage: String,
    current: u32,
    total: u32,
    message: String,
    percentage: u8,
}

#[derive(Debug, Serialize, Deserialize)]
struct ScanProgressResult {
    success: bool,
    progress: ProgressUpdate,
    completed: bool,
    result: Option<ScanResult>,
}

#[tauri::command]
fn test_command() -> String {
    "Tauri API 工作正常!".to_string()
}

#[tauri::command]
fn select_directory() -> SelectDirectoryResult {
    match rfd::FileDialog::new()
        .set_directory("/")
        .pick_folder()
    {
        Some(path) => SelectDirectoryResult {
            success: true,
            path: Some(path.display().to_string()),
            error: None,
        },
        None => SelectDirectoryResult {
            success: false,
            path: None,
            error: Some("用户取消了选择".to_string()),
        },
    }
}

#[tauri::command]
async fn scan_directory(request: ScanRequest) -> Result<ScanResult, String> {
    let path = Path::new(&request.path);
    if !path.exists() || !path.is_dir() {
        return Err("路径不存在或不是目录".to_string());
    }

    println!("开始扫描目录: {}", path.display());
    println!("检测模式: {}, 扫描模式: {}", request.detection_mode, request.scan_mode);

    let mut files = Vec::new();
    let mut total_size = 0u64;

    // 递归扫描目录
    scan_directory_recursive(path, &mut files, &mut total_size, &request)?;
    
    println!("扫描完成，共发现 {} 个文件，总大小: {}", files.len(), format_file_size(total_size));

    // 根据检测模式进行去重
    let groups = match request.detection_mode.as_str() {
        "exact" => find_exact_duplicates(&files, None)?,
        "similar" => find_similar_files(&files, request.similarity_threshold, &request.scan_mode),
        _ => return Err("不支持的检测模式".to_string()),
    };

    // 计算统计信息
    let duplicate_files: u32 = groups.iter().map(|g| g.files.len() as u32).sum();
    let total_duplicate_size: u64 = groups.iter().map(|g| g.size).sum();
    let saved_space = if total_duplicate_size > 0 {
        let keep_size: u64 = groups.iter().map(|g| {
            if let Some(first_file) = g.files.first() {
                first_file.size
            } else {
                0
            }
        }).sum();
        total_duplicate_size - keep_size
    } else {
        0
    };
    let saved_space_str = format_file_size(saved_space);

    println!("去重分析完成，发现 {} 个重复组，{} 个重复文件", groups.len(), duplicate_files);
    println!("重复文件总大小: {}, 可节省空间: {}", format_file_size(total_duplicate_size), saved_space_str);

    Ok(ScanResult {
        total_files: files.len() as u32,
        duplicate_groups: groups.len() as u32,
        duplicate_files,
        saved_space: saved_space_str,
        groups,
    })
}

#[tauri::command]
async fn scan_directory_with_progress(request: ScanRequest, window: tauri::Window) -> Result<ScanProgressResult, String> {
    // 重置取消标志
    reset_cancel_flag();
    
    let path = Path::new(&request.path);
    if !path.exists() || !path.is_dir() {
        return Err("路径不存在或不是目录".to_string());
    }

    println!("开始实时进度扫描目录: {}", path.display());
    println!("检测模式: {}, 扫描模式: {}", request.detection_mode, request.scan_mode);

    let start_time = std::time::Instant::now();
    let mut stage_times: HashMap<String, std::time::Duration> = HashMap::new();

    // 发送第一阶段开始事件
    let stage_start = std::time::Instant::now();
    let _ = window.emit("scan_progress", ProgressUpdate {
        stage: "scanning_files".to_string(),
        current: 0,
        total: 0,
        message: "正在扫描目录文件...".to_string(),
        percentage: 0,
    });

    // 收集文件信息
    let mut files = Vec::new();
    let mut total_size = 0u64;
    let mut scanned_count = 0u32;
    
    for entry in WalkDir::new(&path)
        .follow_links(false)
        .into_iter()
        .filter_map(|e| e.ok())
        .filter(|e| e.file_type().is_file())
    {
        let file_path = entry.path();
        
        // 应用文件类型过滤
        if !should_include_file(file_path.to_str().unwrap_or(""), &request.scan_mode) {
            continue;
        }
        
        if let Ok(metadata) = file_path.metadata() {
            let file_size = metadata.len();
            
            // 检查文件大小限制
            if file_size < request.min_file_size || 
               (request.max_file_size > 0 && file_size > request.max_file_size) {
                continue;
            }

            // 应用文件扩展名过滤
            // 只有当 file_extensions 不为空时才进行过滤
            if !request.file_extensions.is_empty() {
                // 检查文件扩展名是否在允许的列表中
                let file_ext = file_path.extension().and_then(|e| e.to_str()).unwrap_or("");
                // 转换为小写进行比较，避免大小写敏感问题
                let file_ext_lower = file_ext.to_lowercase();
                let is_allowed = request.file_extensions.iter().any(|ext| file_ext_lower == ext.to_lowercase());
                
                if !is_allowed {
                    println!("跳过文件 (扩展名不匹配): {} (扩展名: '{}', 允许的扩展名: {:?})", 
                             file_path.display(), file_ext, request.file_extensions);
                    continue; // 扩展名不匹配，跳过此文件
                } else {
                    println!("包含文件 (扩展名匹配): {} (扩展名: '{}')", 
                             file_path.display(), file_ext);
                }
            } else {
                println!("包含文件 (无扩展名限制): {} (扩展名: '{}')", 
                         file_path.display(), 
                         file_path.extension().and_then(|e| e.to_str()).unwrap_or("无扩展名"));
            }
            // 如果 file_extensions 为空，表示扫描所有文件类型，不跳过任何文件
            
            let file_info = FileInfo {
                name: file_path.file_name().unwrap_or_default().to_string_lossy().to_string(),
                path: file_path.display().to_string(),
                size: file_size,
                modified: metadata.modified()
                    .unwrap_or(SystemTime::now())
                    .duration_since(UNIX_EPOCH)
                    .unwrap_or(Duration::from_secs(0))
                    .as_secs(),
                hash: None,
                image_hash: None,
                audio_fingerprint: None,
            };
            
            files.push(file_info);
            total_size += file_size;
        }
        
        scanned_count += 1;
        
        // 每扫描100个文件发送一次进度更新，进度从0%到10%
        if scanned_count % 100 == 0 {
            let progress_percentage = ((scanned_count * 10) / (files.len() as u32)).min(10) as u8;
            let progress = ProgressUpdate {
                stage: "scanning_files".to_string(),
                current: scanned_count,
                total: files.len() as u32,
                message: format!("正在扫描文件... 已发现 {} 个文件", files.len()),
                percentage: progress_percentage,
            };
            if let Err(e) = window.emit("scan_progress", progress) {
                eprintln!("发送进度更新失败: {}", e);
            }
        }

        // 检查取消标志
        if should_cancel() {
            println!("收到取消扫描请求，停止扫描。");
            return Err("用户取消了扫描".to_string());
        }
    }
    
    let total_files = files.len() as u32;
    let scan_duration = stage_start.elapsed();
    stage_times.insert("scanning_files".to_string(), scan_duration);
    
    println!("文件扫描完成，共发现 {} 个文件，总大小: {}，耗时: {:.2?}", 
             total_files, format_file_size(total_size), scan_duration);

    // 发送第一阶段完成事件
    let _ = window.emit("scan_progress", ProgressUpdate {
        stage: "files_scanned".to_string(),
        current: total_files,
        total: total_files,
        message: format!("文件扫描完成！发现 {} 个文件，正在分析重复文件...", total_files),
        percentage: 10,
    });

    // 第二阶段：分析重复文件
    println!("第二阶段：分析重复文件");
    let analysis_start = std::time::Instant::now();
    let _ = window.emit("scan_progress", ProgressUpdate {
        stage: "analyzing_duplicates".to_string(),
        current: 0,
        total: total_files,
        message: "正在分析重复文件...".to_string(),
        percentage: 10,
    });

    let groups = match request.detection_mode.as_str() {
        "exact" => {
            println!("使用精确匹配模式查找重复文件...");
            
            // 发送哈希计算开始事件
            let hash_start = std::time::Instant::now();
            let _ = window.emit("scan_progress", ProgressUpdate {
                stage: "calculating_hashes".to_string(),
                current: 0,
                total: total_files,
                message: "正在计算文件哈希值，这可能需要一些时间...".to_string(),
                percentage: 10,
            });
            
            let result = find_exact_duplicates(&files, Some(&window))?;
            
            let hash_duration = hash_start.elapsed();
            stage_times.insert("calculating_hashes".to_string(), hash_duration);
            println!("哈希计算完成，耗时: {:.2?}", hash_duration);
            
            // 发送哈希计算完成事件
            let _ = window.emit("scan_progress", ProgressUpdate {
                stage: "hashes_calculated".to_string(),
                current: total_files,
                total: total_files,
                message: "文件哈希值计算完成，正在分析重复文件...".to_string(),
                percentage: 90,
            });
            
            result
        },
        "similar" => {
            println!("使用相似性匹配模式查找重复文件...");
            
            // 发送哈希计算开始事件
            let hash_start = std::time::Instant::now();
            let _ = window.emit("scan_progress", ProgressUpdate {
                stage: "calculating_hashes".to_string(),
                current: 0,
                total: total_files,
                message: "正在计算文件哈希值，这可能需要一些时间...".to_string(),
                percentage: 10,
            });
            
            let result = find_similar_files(&files, request.similarity_threshold, &request.scan_mode);
            
            let hash_duration = hash_start.elapsed();
            stage_times.insert("calculating_hashes".to_string(), hash_duration);
            println!("哈希计算完成，耗时: {:.2?}", hash_duration);
            
            // 发送哈希计算完成事件
            let _ = window.emit("scan_progress", ProgressUpdate {
                stage: "hashes_calculated".to_string(),
                current: total_files,
                total: total_files,
                message: "文件哈希值计算完成，正在分析重复文件...".to_string(),
                percentage: 90,
            });
            
            result
        },
        _ => return Err("不支持的检测模式".to_string()),
    };

    let analysis_duration = analysis_start.elapsed();
    stage_times.insert("analyzing_duplicates".to_string(), analysis_duration);

    // 发送分析完成事件
    let _ = window.emit("scan_progress", ProgressUpdate {
        stage: "duplicates_analyzed".to_string(),
        current: total_files,
        total: total_files,
        message: format!("重复文件分析完成！发现 {} 个重复组", groups.len()),
        percentage: 100,
    });

    // 计算统计信息
    let duplicate_files: u32 = groups.iter().map(|g| g.files.len() as u32).sum();
    let total_duplicate_size: u64 = groups.iter().map(|g| g.size).sum();
    let saved_space = if total_duplicate_size > 0 {
        let keep_size: u64 = groups.iter().map(|g| {
            if let Some(first_file) = g.files.first() {
                first_file.size
            } else {
                0
            }
        }).sum();
        total_duplicate_size - keep_size
    } else {
        0
    };
    let saved_space_str = format_file_size(saved_space);

    println!("去重分析完成，发现 {} 个重复组，{} 个重复文件", groups.len(), duplicate_files);
    println!("重复文件总大小: {}, 可节省空间: {}", format_file_size(total_duplicate_size), saved_space_str);

    // 发送最终完成事件
    let _ = window.emit("scan_progress", ProgressUpdate {
        stage: "completed".to_string(),
        current: total_files,
        total: total_files,
        message: format!("扫描完成！发现 {} 个文件，{} 个重复组，可节省空间 {}", total_files, groups.len(), saved_space_str),
        percentage: 100,
    });

    let total_duration = start_time.elapsed();
    stage_times.insert("total".to_string(), total_duration);

    // 输出各阶段时间统计
    println!("\n=== 各阶段时间统计 ===");
    println!("总耗时: {:.2?}", total_duration);
    for (stage, duration) in &stage_times {
        if *stage != "total" {
            let percentage = (duration.as_millis() as f64 / total_duration.as_millis() as f64) * 100.0;
            println!("{}: {:.2?} ({:.1}%)", stage, duration, percentage);
        }
    }
    println!("=====================\n");

    let result = ScanResult {
        total_files,
        duplicate_groups: groups.len() as u32,
        duplicate_files,
        saved_space: saved_space_str,
        groups,
    };

    Ok(ScanProgressResult {
        success: true,
        progress: ProgressUpdate {
            stage: "completed".to_string(),
            current: total_files,
            total: total_files,
            message: "扫描完成！".to_string(),
            percentage: 100,
        },
        completed: true,
        result: Some(result),
    })
}

#[tauri::command]
async fn cancel_scan() -> Result<(), String> {
    println!("收到取消扫描请求");
    set_cancel_flag();
    Ok(())
}

fn scan_directory_recursive(
    dir: &Path, 
    files: &mut Vec<FileInfo>, 
    total_size: &mut u64,
    request: &ScanRequest
) -> Result<(), String> {
    let mut processed_count = 0u32;
    let mut skipped_count = 0u32;
    
    println!("开始扫描目录: {}", dir.display());
    
    for entry in fs::read_dir(dir).map_err(|e| e.to_string())? {
        let entry = entry.map_err(|e| e.to_string())?;
        let path = entry.path();
        
        if path.is_file() {
            let metadata = fs::metadata(&path).map_err(|e| e.to_string())?;
            let size = metadata.len();
            
            // 检查文件大小限制
            if request.min_file_size > 0 && size < request.min_file_size {
                skipped_count += 1;
                continue;
            }
            if request.max_file_size > 0 && size > request.max_file_size {
                skipped_count += 1;
                continue;
            }

            // 跳过系统文件和临时文件
            if let Some(file_name) = path.file_name() {
                let file_name_str = file_name.to_string_lossy();
                if file_name_str.starts_with('.') || 
                   file_name_str.starts_with('~') ||
                   file_name_str.ends_with(".tmp") ||
                   file_name_str.ends_with(".temp") {
                    skipped_count += 1;
                    continue;
                }
            }

            let modified = metadata
                .modified()
                .unwrap_or(SystemTime::now())
                .duration_since(UNIX_EPOCH)
                .unwrap_or_default()
                .as_secs();

            let file_info = FileInfo {
                name: path.file_name().unwrap_or_default().to_string_lossy().to_string(),
                path: path.display().to_string(),
                size,
                modified,
                hash: None,
                image_hash: None,
                audio_fingerprint: None,
            };

            files.push(file_info);
            *total_size += size;
            processed_count += 1;
            
            // 每处理1000个文件输出一次进度
            if processed_count % 1000 == 0 {
                println!("已处理 {} 个文件，跳过 {} 个文件，当前目录: {}", processed_count, skipped_count, dir.display());
            }
        } else if path.is_dir() {
            scan_directory_recursive(&path, files, total_size, request)?;
        }
    }
    
    println!("目录 {} 处理完成，共 {} 个文件，跳过 {} 个文件", dir.display(), processed_count, skipped_count);
    Ok(())
}

// 文件类型过滤函数
fn should_include_file(file_path: &str, scan_mode: &str) -> bool {
    let extension = std::path::Path::new(file_path)
        .extension()
        .and_then(|ext| ext.to_str())
        .unwrap_or("")
        .to_lowercase();
    
    match scan_mode {
        "image" => {
            matches!(extension.as_str(), 
                "jpg" | "jpeg" | "png" | "gif" | "bmp" | "ico" | "webp" | "tiff" | "svg" | "raw"
            )
        },
        "audio" => {
            matches!(extension.as_str(), 
                "mp3" | "wav" | "flac" | "aac" | "ogg" | "wma" | "m4a" | "opus" | "aiff" | "alac"
            )
        },
        "video" => {
            matches!(extension.as_str(), 
                "mp4" | "avi" | "mkv" | "mov" | "wmv" | "flv" | "webm" | "m4v" | "3gp" | "ts"
            )
        },
        "document" => {
            matches!(extension.as_str(), 
                "pdf" | "doc" | "docx" | "txt" | "rtf" | "odt" | "pages" | "md" | "html" | "xml"
            )
        },
        _ => true // 通用模式包含所有文件
    }
}

fn find_exact_duplicates(files: &[FileInfo], window: Option<&tauri::Window>) -> Result<Vec<DuplicateGroup>, String> {
    let mut size_groups: HashMap<u64, Vec<&FileInfo>> = HashMap::new();
    
    // 按大小分组
    for file in files {
        size_groups.entry(file.size).or_insert_with(Vec::new).push(file);
    }

    let mut groups = Vec::new();
    let total_size_groups = size_groups.len();
    
    // 对每个大小组计算哈希值
    for (group_index, (_size, size_files)) in size_groups.into_iter().enumerate() {
        // 检查取消标志
        if should_cancel() {
            println!("收到取消扫描请求，停止哈希计算。");
            return Err("用户取消了扫描".to_string());
        }
        
        if size_files.len() < 2 {
            continue;
        }

        // 发送哈希计算进度更新 - 在处理组之前发送
        if let Some(window) = window {
            // 计算动态进度：从10%逐渐增加到90%
            // 扫描阶段占10%，哈希计算占80%，分析占10%
            // 使用当前组索引来计算进度，这样会更平滑
            let progress_percentage = 10 + ((group_index * 80) / (total_size_groups as usize)).min(80) as u8;
            let _ = window.emit("scan_progress", ProgressUpdate {
                stage: "calculating_hashes".to_string(),
                current: (group_index + 1) as u32,  // 显示当前正在处理的组
                total: total_size_groups as u32,
                message: format!("正在计算文件哈希值... 处理第 {} 组，共 {} 组", group_index + 1, total_size_groups),
                percentage: progress_percentage,
            });
        }

        let mut hash_groups: HashMap<String, Vec<&FileInfo>> = HashMap::new();
        
        for file in size_files {
            let hash = calculate_file_hash(&file.path).unwrap_or_default();
            hash_groups.entry(hash).or_insert_with(Vec::new).push(file);
        }

        // 创建重复组
        for (_, hash_files) in hash_groups {
            if hash_files.len() >= 2 {
                let group_files: Vec<FileInfo> = hash_files.iter().map(|f| (*f).clone()).collect();
                groups.push(DuplicateGroup {
                    files: group_files,
                    size: _size,
                    similarity_type: "完全一致".to_string(),
                });
            }
        }
    }

    Ok(groups)
}

fn find_similar_files(
    files: &[FileInfo], 
    threshold: u32, 
    scan_mode: &str
) -> Vec<DuplicateGroup> {
    // 直接返回对应模式的结果，不需要中间变量
    match scan_mode {
        "image" => find_similar_images(files, threshold),
        "video" => find_similar_videos(files, threshold),
        "music" => find_similar_audio(files, threshold),
        "document" => find_similar_documents(files, threshold),
        _ => find_similar_general(files, threshold),
    }
}

fn find_similar_images(files: &[FileInfo], threshold: u32) -> Vec<DuplicateGroup> {
    let mut groups = Vec::new();
    let image_extensions = ["jpg", "jpeg", "png", "bmp", "gif", "tiff", "webp"];
    
    let image_files: Vec<&FileInfo> = files.iter()
        .filter(|f| {
            let ext = f.path.split('.').last().unwrap_or("").to_lowercase();
            image_extensions.contains(&ext.as_str())
        })
        .collect();

    // 计算图像哈希值
    let mut hash_groups: HashMap<String, Vec<&FileInfo>> = HashMap::new();
    
    for file in image_files {
        if let Some(image_hash) = calculate_image_hash(&file.path) {
            hash_groups.entry(image_hash).or_insert_with(Vec::new).push(file);
        }
    }

    // 创建相似组
    for (_, hash_files) in hash_groups {
        if hash_files.len() >= 2 {
            let group_files: Vec<FileInfo> = hash_files.iter().map(|f| (*f).clone()).collect();
            let total_size: u64 = group_files.iter().map(|f| f.size).sum();
            
            groups.push(DuplicateGroup {
                files: group_files,
                size: total_size,
                similarity_type: format!("图像相似 ({}%)", threshold),
            });
        }

    }

    groups
}

fn find_similar_audio(files: &[FileInfo], threshold: u32) -> Vec<DuplicateGroup> {
    let mut groups = Vec::new();
    let audio_extensions = ["mp3", "wav", "flac", "aac", "ogg", "wma"];
    
    let audio_files: Vec<&FileInfo> = files.iter()
        .filter(|f| {
            let ext = f.path.split('.').last().unwrap_or("").to_lowercase();
            audio_extensions.contains(&ext.as_str())
        })
        .collect();

    // 按大小分组，音频文件大小相近通常表示内容相似
    let mut size_groups: HashMap<u64, Vec<&FileInfo>> = HashMap::new();
    
    for file in audio_files {
        let size_bucket = (file.size / 1024) * 1024; // 按1KB分组
        size_groups.entry(size_bucket).or_insert_with(Vec::new).push(file);
    }

    // 创建相似组
    for (_, size_files) in size_groups {
        if size_files.len() >= 2 {
            let group_files: Vec<FileInfo> = size_files.iter().map(|f| (*f).clone()).collect();
            let total_size: u64 = group_files.iter().map(|f| f.size).sum();
            
            groups.push(DuplicateGroup {
                files: group_files,
                size: total_size,
                similarity_type: format!("音频相似 ({}%)", threshold),
            });
        }
    }

    groups
}

fn find_similar_videos(files: &[FileInfo], threshold: u32) -> Vec<DuplicateGroup> {
    let mut groups = Vec::new();
    let video_extensions = ["mp4", "avi", "mkv", "mov", "wmv", "flv", "webm"];
    
    let video_files: Vec<&FileInfo> = files.iter()
        .filter(|f| {
            let ext = f.path.split('.').last().unwrap_or("").to_lowercase();
            video_extensions.contains(&ext.as_str())
        })
        .collect();

    // 视频文件按大小和时长分组
    let mut size_groups: HashMap<u64, Vec<&FileInfo>> = HashMap::new();
    
    for file in video_files {
        let size_bucket = (file.size / (1024 * 1024)) * (1024 * 1024); // 按1MB分组
        size_groups.entry(size_bucket).or_insert_with(Vec::new).push(file);
    }

    // 创建相似组
    for (_, size_files) in size_groups {
        if size_files.len() >= 2 {
            let group_files: Vec<FileInfo> = size_files.iter().map(|f| (*f).clone()).collect();
            let total_size: u64 = group_files.iter().map(|f| f.size).sum();
            
            groups.push(DuplicateGroup {
                files: group_files,
                size: total_size,
                similarity_type: format!("视频相似 ({}%)", threshold),
            });
        }
    }

    groups
}

fn find_similar_documents(files: &[FileInfo], threshold: u32) -> Vec<DuplicateGroup> {
    let mut groups = Vec::new();
    let doc_extensions = ["pdf", "doc", "docx", "txt", "rtf", "odt"];
    
    let doc_files: Vec<&FileInfo> = files.iter()
        .filter(|f| {
            let ext = f.path.split('.').last().unwrap_or("").to_lowercase();
            doc_extensions.contains(&ext.as_str())
        })
        .collect();

    // 文档文件按大小分组
    let mut size_groups: HashMap<u64, Vec<&FileInfo>> = HashMap::new();
    
    for file in doc_files {
        let size_bucket = (file.size / 1024) * 1024; // 按1KB分组
        size_groups.entry(size_bucket).or_insert_with(Vec::new).push(file);
    }

    // 创建相似组
    for (_, size_files) in size_groups {
        if size_files.len() >= 2 {
            let group_files: Vec<FileInfo> = size_files.iter().map(|f| (*f).clone()).collect();
            let total_size: u64 = group_files.iter().map(|f| f.size).sum();
            
            groups.push(DuplicateGroup {
                files: group_files,
                size: total_size,
                similarity_type: format!("文档相似 ({}%)", threshold),
            });
        }
    }

    groups
}

fn find_similar_general(files: &[FileInfo], threshold: u32) -> Vec<DuplicateGroup> {
    // 通用模式：结合大小和部分哈希值
    let mut groups = Vec::new();
    
    // 按大小分组
    let mut size_groups: HashMap<u64, Vec<&FileInfo>> = HashMap::new();
    
    for file in files {
        let size_bucket = (file.size / (1024 * 1024)) * (1024 * 1024); // 按1MB分组
        size_groups.entry(size_bucket).or_insert_with(Vec::new).push(file);
    }

    // 对每个大小组计算部分哈希值
    for (_size, size_files) in size_groups {
        if size_files.len() < 2 {
            continue;
        }

        let mut partial_hash_groups: HashMap<String, Vec<&FileInfo>> = HashMap::new();
        
        for file in size_files {
            let partial_hash = calculate_partial_hash(&file.path).unwrap_or_default();
            partial_hash_groups.entry(partial_hash).or_insert_with(Vec::new).push(file);
        }

        // 创建相似组
        for (_, hash_files) in partial_hash_groups {
            if hash_files.len() >= 2 {
                let group_files: Vec<FileInfo> = hash_files.iter().map(|f| (*f).clone()).collect();
                let total_size: u64 = group_files.iter().map(|f| f.size).sum();
                
                groups.push(DuplicateGroup {
                    files: group_files,
                    size: total_size,
                    similarity_type: format!("内容相似 ({}%)", threshold),
                });
            }
        }
    }

    groups
}

fn calculate_file_hash(file_path: &str) -> Option<String> {
    // 获取文件元数据
    let metadata = match fs::metadata(file_path) {
        Ok(m) => m,
        Err(_) => return None,
    };
    
    let file_size = metadata.len();
    let modified_time = metadata
        .modified()
        .unwrap_or(SystemTime::now())
        .duration_since(SystemTime::UNIX_EPOCH)
        .unwrap_or_default()
        .as_secs();
    
    // 检查文件级缓存
    let mut file_cache = FileCache::new("./cache");
    let _key = file_cache.get_cache_key(file_path, file_size, modified_time);
    if let Some(cached_hash) = file_cache.get(file_path, file_size, modified_time) {
        println!("文件级缓存命中: {}", file_path);
        return Some(cached_hash);
    }
    
    // 缓存未命中，计算哈希
    let mut file = match fs::File::open(file_path) {
        Ok(f) => f,
        Err(_) => return None,
    };
    
    let mut hasher = Sha256::new();
    let mut buffer = [0; 32768]; // 32KB缓冲区
    
    loop {
        let n = match file.read(&mut buffer) {
            Ok(n) => n,
            Err(_) => return None,
        };
        if n == 0 { break; }
        hasher.update(&buffer[..n]);
    }
    
    let hash = format!("{:x}", hasher.finalize());
    
    // 保存到缓存
    file_cache.set(file_path.to_string(), file_size, modified_time, hash.clone());
    println!("文件哈希已缓存: {}", file_path);
    
    Some(hash)
}

fn calculate_partial_hash(file_path: &str) -> Option<String> {
    // 获取文件元数据
    let metadata = match fs::metadata(file_path) {
        Ok(m) => m,
        Err(_) => return None,
    };
    
    let file_size = metadata.len();
    let modified_time = metadata
        .modified()
        .unwrap_or(SystemTime::now())
        .duration_since(SystemTime::UNIX_EPOCH)
        .unwrap_or_default()
        .as_secs();
    
    // 检查文件级缓存（使用部分哈希的标识）
    let mut file_cache = FileCache::new("./cache");
    let partial_key = format!("partial_{}", file_path);
    if let Some(cached_hash) = file_cache.get(&partial_key, file_size, modified_time) {
        println!("部分哈希缓存命中: {}", file_path);
        return Some(cached_hash);
    }
    
    // 缓存未命中，计算部分哈希
    let mut file = match fs::File::open(file_path) {
        Ok(f) => f,
        Err(_) => return None,
    };
    
    let mut hasher = Sha256::new();
    let mut buffer = [0; 4096];
    
    // 只读取文件的前4KB来计算部分哈希
    let n = match file.read(&mut buffer) {
        Ok(n) => n,
        Err(_) => return None,
    };
    if n > 0 {
        hasher.update(&buffer[..n]);
    }
    
    let hash = format!("{:x}", hasher.finalize());
    
    // 保存到缓存
    file_cache.set(partial_key, file_size, modified_time, hash.clone());
    println!("部分哈希已缓存: {}", file_path);
    
    Some(hash)
}

fn calculate_image_hash(file_path: &str) -> Option<String> {
    // 简化的图像哈希计算
    let img = image::open(file_path).ok()?;
    let resized = img.resize(8, 8, FilterType::Lanczos3);
    let gray = resized.to_luma8(); // 修复：使用to_luma8()而不是to_luma()
    
    let mut hash = String::new();
    let avg = gray.pixels().map(|p| p[0] as u32).sum::<u32>() / 64;
    
    for pixel in gray.pixels() {
        hash.push(if pixel[0] as u32 > avg { '1' } else { '0' });
    }
    
    Some(hash)
}

fn format_file_size(bytes: u64) -> String {
    const UNITS: [&str; 5] = ["B", "KB", "MB", "GB", "TB"];
    let mut size = bytes as f64;
    let mut unit_index = 0;
    
    while size >= 1024.0 && unit_index < UNITS.len() - 1 {
        size /= 1024.0;
        unit_index += 1;
    }
    
    format!("{:.2} {}", size, UNITS[unit_index])
}

#[tauri::command]
fn delete_file(file_path: String) -> Result<(), String> {
    // 检查文件是否存在
    if !Path::new(&file_path).exists() {
        return Err(format!("文件不存在: {}", file_path));
    }
    
    // 尝试删除文件
    match fs::remove_file(&file_path) {
        Ok(_) => {
            println!("文件删除成功: {}", file_path);
            Ok(())
        }
        Err(e) => {
            let error_msg = format!("删除文件失败: {} - {}", file_path, e);
            println!("{}", error_msg);
            Err(error_msg)
        }
    }
}

#[tauri::command]
fn move_to_recycle_bin(file_path: String) -> Result<(), String> {
    use std::path::Path;

    if !Path::new(&file_path).exists() {
        return Err(format!("文件不存在: {}", file_path));
    }

    match trash::delete(&file_path) {
        Ok(_) => Ok(()),
        Err(e) => Err(format!("移动到回收站失败: {}", e)),
    }
}

#[tauri::command]
fn verify_file_exists(file_path: String) -> Result<bool, String> {
    let path = Path::new(&file_path);
    Ok(path.exists() && path.is_file())
}

#[tauri::command]
fn clean_empty_folders(parent_paths: Vec<String>) -> Result<(), String> {
    let mut cleaned_count = 0;
    
    for parent_path in parent_paths {
        if let Ok(entries) = fs::read_dir(&parent_path) {
            // 检查目录是否为空
            let is_empty = entries.count() == 0;
            
            if is_empty {
                // 尝试删除空目录
                match fs::remove_dir(&parent_path) {
                    Ok(_) => {
                        println!("清理空文件夹成功: {}", parent_path);
                        cleaned_count += 1;
                    }
                    Err(e) => {
                        println!("清理空文件夹失败: {} - {}", parent_path, e);
                    }
                }
            }
        }
    }
    
    println!("清理完成，共清理 {} 个空文件夹", cleaned_count);
    Ok(())
}

#[tauri::command]
fn open_file_location(file_path: String) -> Result<(), String> {
    use std::path::Path;
    
    let path = Path::new(&file_path);
    if !path.exists() {
        return Err(format!("文件不存在: {}", file_path));
    }
    
    // 获取文件所在目录
    let parent_dir = path.parent()
        .ok_or_else(|| "无法获取文件所在目录".to_string())?;
    
    // 使用系统默认的文件管理器打开目录
    match open::that(parent_dir) {
        Ok(_) => {
            println!("成功打开文件所在目录: {}", parent_dir.display());
            Ok(())
        }
        Err(e) => {
            let error_msg = format!("打开目录失败: {} - {}", parent_dir.display(), e);
            println!("{}", error_msg);
            Err(error_msg)
        }
    }
}

// 缩略图缓存
lazy_static::lazy_static! {
    static ref THUMBNAIL_CACHE: Mutex<LruCache<String, String>> = 
        Mutex::new(LruCache::new(NonZeroUsize::new(1000).unwrap()));
}

// 缩略图生成配置
const THUMBNAIL_SIZE: u32 = 256;
const THUMBNAIL_QUALITY: f32 = 80.0;

#[derive(Debug, Serialize, Deserialize)]
struct ThumbnailInfo {
    data: String,      // base64编码的WebP数据
    width: u32,
    height: u32,
    size: usize,       // 缩略图大小（字节）
}

#[tauri::command]
async fn generate_thumbnail(file_path: String) -> Result<ThumbnailInfo, String> {
    // 检查缓存
    {
        let mut cache = THUMBNAIL_CACHE.lock().unwrap();
        if let Some(cached_data) = cache.get(&file_path) {
            // 解析缓存的数据
            if let Ok(info) = serde_json::from_str::<ThumbnailInfo>(cached_data) {
                return Ok(info);
            }
        }
    }
    
    let path = Path::new(&file_path);
    if !path.exists() {
        return Err("文件不存在".to_string());
    }
    
    // 获取文件扩展名
    let extension = path.extension()
        .and_then(|ext| ext.to_str())
        .map(|s| s.to_lowercase())
        .unwrap_or_default();
    
    let thumbnail_info = match extension.as_str() {
        "jpg" | "jpeg" | "png" | "gif" | "bmp" | "webp" | "tiff" => {
            generate_image_thumbnail(&file_path).await?
        }
        "mp4" | "avi" | "mov" | "wmv" | "flv" | "webm" | "mkv" => {
            generate_video_thumbnail(&file_path).await?
        }
        _ => return Err("不支持的文件类型".to_string())
    };
    
    // 缓存结果
    {
        let mut cache = THUMBNAIL_CACHE.lock().unwrap();
        let cache_data = serde_json::to_string(&thumbnail_info).unwrap_or_default();
        cache.put(file_path, cache_data);
    }
    
    Ok(thumbnail_info)
}

async fn generate_image_thumbnail(file_path: &str) -> Result<ThumbnailInfo, String> {
    // 加载图片
    let img = image::open(file_path)
        .map_err(|e| format!("无法打开图片: {}", e))?;
    
    // 计算缩略图尺寸（保持宽高比）
    let (orig_width, orig_height) = (img.width(), img.height());
    let (thumb_width, thumb_height) = calculate_thumbnail_size(orig_width, orig_height);
    
    // 生成缩略图
    let thumbnail = img.resize_exact(thumb_width, thumb_height, FilterType::Lanczos3);
    
    // 转换为WebP格式
    let webp_data = encode_webp(&thumbnail)?;
    
    // 控制文件大小在5-10kB
    let final_data = optimize_webp_size(webp_data, 5120, 10240)?; // 5-10kB
    
    let base64_data = general_purpose::STANDARD.encode(&final_data);
    
    Ok(ThumbnailInfo {
        data: base64_data,
        width: thumb_width,
        height: thumb_height,
        size: final_data.len(),
    })
}

async fn generate_video_thumbnail(_file_path: &str) -> Result<ThumbnailInfo, String> {
    // 暂时使用占位图片作为视频缩略图
    // 在生产环境中，可以使用系统的视频解码器或外部工具
    
    let _placeholder_svg = create_video_placeholder_svg();
    
    // 创建一个简单的视频图标作为缩略图
    let img = image::ImageBuffer::from_fn(THUMBNAIL_SIZE, THUMBNAIL_SIZE, |x, y| {
        // 创建渐变背景
        let center_x = THUMBNAIL_SIZE / 2;
        let center_y = THUMBNAIL_SIZE / 2;
        let distance = ((x as f32 - center_x as f32).powi(2) + (y as f32 - center_y as f32).powi(2)).sqrt();
        let max_distance = center_x as f32 * 1.4;
        let intensity = (1.0 - (distance / max_distance).min(1.0)) * 255.0;
        
        // 视频图标区域
        if is_in_play_button(x, y, THUMBNAIL_SIZE) {
            image::Rgb([255, 255, 255]) // 白色播放按钮
        } else {
            let blue = (100.0 + intensity * 0.6) as u8;
            image::Rgb([30, 30, blue]) // 蓝色渐变背景
        }
    });
    
    let dynamic_img = DynamicImage::ImageRgb8(img);
    
    // 转换为WebP
    let webp_data = encode_webp(&dynamic_img)?;
    let final_data = optimize_webp_size(webp_data, 5120, 10240)?;
    let base64_data = general_purpose::STANDARD.encode(&final_data);
    
    Ok(ThumbnailInfo {
        data: base64_data,
        width: THUMBNAIL_SIZE,
        height: THUMBNAIL_SIZE,
        size: final_data.len(),
    })
}

fn create_video_placeholder_svg() -> String {
    format!("<svg width='256' height='256' xmlns='http://www.w3.org/2000/svg'><rect width='256' height='256' fill='#3b82f6' rx='12'/><circle cx='128' cy='128' r='40' fill='rgba(255,255,255,0.9)'/><polygon points='118,108 118,148 148,128' fill='#1e3a8a'/></svg>")
}

fn is_in_play_button(x: u32, y: u32, size: u32) -> bool {
    let center_x = size / 2;
    let center_y = size / 2;
    let button_radius = size / 6;
    
    let dx = x as i32 - center_x as i32;
    let dy = y as i32 - center_y as i32;
    let distance = ((dx * dx + dy * dy) as f32).sqrt();
    
    if distance > button_radius as f32 {
        return false;
    }
    
    // 播放按钮三角形区域
    let triangle_size = button_radius as i32 / 2;
    let relative_x = dx + triangle_size / 2;
    let relative_y = dy;
    
    relative_x >= 0 && 
    relative_x <= triangle_size && 
    relative_y.abs() <= (triangle_size - relative_x) / 2
}

fn calculate_thumbnail_size(orig_width: u32, orig_height: u32) -> (u32, u32) {
    let max_size = THUMBNAIL_SIZE;
    
    if orig_width <= max_size && orig_height <= max_size {
        return (orig_width, orig_height);
    }
    
    let ratio = (orig_width as f32) / (orig_height as f32);
    
    if orig_width > orig_height {
        (max_size, (max_size as f32 / ratio) as u32)
    } else {
        ((max_size as f32 * ratio) as u32, max_size)
    }
}

fn encode_webp(img: &DynamicImage) -> Result<Vec<u8>, String> {
    let rgb_img = img.to_rgb8();
    let (width, height) = rgb_img.dimensions();
    
    let encoder = webp::Encoder::from_rgb(&rgb_img, width, height);
    let webp_memory = encoder.encode(THUMBNAIL_QUALITY);
    
    Ok(webp_memory.to_vec())
}

fn optimize_webp_size(data: Vec<u8>, min_size: usize, max_size: usize) -> Result<Vec<u8>, String> {
    if data.len() >= min_size && data.len() <= max_size {
        return Ok(data);
    }
    
    // 如果文件太大，需要降低质量
    if data.len() > max_size {
        // TODO: 实现质量调整逻辑
        // 这里简化处理，实际应该重新编码
        Ok(data)
    } else {
        Ok(data)
    }
}

#[tauri::command]
async fn generate_video_preview(file_path: String) -> Result<String, String> {
    // 暂时返回空字符串，表示不支持视频预览生成
    // 在有FFmpeg支持的环境中可以实现真正的视频预览
    println!("视频预览请求: {}", file_path);
    
    // 返回一个提示信息
    Err("视频预览功能需要FFmpeg支持，当前版本使用占位图标".to_string())
}

#[derive(Debug, Serialize, Deserialize)]
struct ClusterGroup {
    id: String,
    files: Vec<FileInfo>,
    similarity_score: f32,
    center_file: Option<FileInfo>,
    is_expanded: bool,
}

#[tauri::command]
async fn cluster_similar_files(groups: Vec<DuplicateGroup>) -> Result<Vec<ClusterGroup>, String> {
    let mut clusters = Vec::new();
    
    for (index, group) in groups.iter().enumerate() {
        // 计算组内文件的相似度（简化实现）
        let similarity_score = calculate_group_similarity(group);
        
        // 选择中心文件（通常是第一个文件）
        let center_file = group.files.first().cloned();
        
        let cluster = ClusterGroup {
            id: format!("cluster_{}", index),
            files: group.files.clone(),
            similarity_score,
            center_file,
            is_expanded: false, // 默认折叠
        };
        
        clusters.push(cluster);
    }
    
    // 按相似度排序
    clusters.sort_by(|a, b| b.similarity_score.partial_cmp(&a.similarity_score).unwrap());
    
    Ok(clusters)
}

fn calculate_group_similarity(group: &DuplicateGroup) -> f32 {
    // 简化的相似度计算
    // 实际应该基于图像哈希、文件大小等多个因素
    if group.files.len() <= 1 {
        return 0.0;
    }
    
    // 基于文件大小的相似度
    let sizes: Vec<u64> = group.files.iter().map(|f| f.size).collect();
    let avg_size = sizes.iter().sum::<u64>() as f64 / sizes.len() as f64;
    
    let variance = sizes.iter()
        .map(|&size| {
            let diff = size as f64 - avg_size;
            diff * diff
        })
        .sum::<f64>() / sizes.len() as f64;
    
    let std_dev = variance.sqrt();
    let coefficient_of_variation = std_dev / avg_size;
    
    // 转换为0-1的相似度分数（变异系数越小，相似度越高）
    (1.0 - coefficient_of_variation.min(1.0)) as f32
}

fn main() {
    tauri::Builder::default()
        .invoke_handler(tauri::generate_handler![
            test_command, 
            select_directory, 
            scan_directory,
            scan_directory_with_progress,
            cancel_scan,
            delete_file,
            move_to_recycle_bin,
            clean_empty_folders,
            verify_file_exists,
            open_file_location,
            generate_thumbnail,
            generate_video_preview,
            cluster_similar_files
        ])
        .run(tauri::generate_context!())
        .expect("error while running tauri application");
}
