use std::collections::{HashMap, HashSet};
use std::fs::{copy, create_dir_all};
use std::io::Read;
use std::sync::{mpsc, Arc, RwLock};
use std::time::Instant;
use std::{
    fs::File,
    path::{Path, PathBuf},
};

use log::{debug, info, warn};
use anyhow::{Context, Result};
use hex::ToHex;
use regex::Regex;
use tiny_keccak::{Sha3, Hasher};

use threadpool::ThreadPool;
use rusqlite::{params, Connection};

use clap::Parser;

#[derive(clap::ValueEnum, Clone, Debug)]
enum Action {
    #[value(name = "COPY")]
    Copy,
    #[value(name = "REPORT")]
    Report,
}

#[derive(clap::ValueEnum, Clone, Debug)]
enum Mode {
    #[value(name = "SINGLE")]
    Single,
    #[value(name = "MULTI")]
    Multi,
}

// 新增：输入文件夹解析支持 path$alias 格式
#[derive(Clone, Debug)]
struct InputFolder {
    path: PathBuf,
    alias: Option<String>,
}

impl std::str::FromStr for InputFolder {
    type Err = String;
    fn from_str(s: &str) -> Result<Self, Self::Err> {
        // 仅按第一个冒号分割，避免 Windows 路径盘符影响
        let mut parts = s.splitn(2, '$');
        let path_str = parts.next().unwrap_or("");
        if path_str.is_empty() {
            return Err("路径不能为空".to_string());
        }
        let alias_opt = parts
            .next()
            .map(|a| a.trim().to_string())
            .filter(|a| !a.is_empty());
        Ok(InputFolder { path: PathBuf::from(path_str), alias: alias_opt })
    }
}

#[derive(Parser, Debug)]
#[command(version, about, long_about = "Uniqua - Duplicate File Management Tool")]
pub struct Opt {
    #[arg(short = 'i', long = "in", num_args(1..), help = "Input folder paths (supports path:alias)")]
    in_folders: Vec<InputFolder>,

    #[arg(short = 'o', long = "out", help = "Output folder path")]
    out_folder: PathBuf,

    #[arg(short = 'v', long = "verbose", help = "Enable verbose logging")]
    verbose: bool,

    #[arg(
        long = "mode",
        value_enum,
        default_value_t = Mode::Single,
        help = "Processing mode: SINGLE (sequential hashing) or MULTI (parallel hashing)"
    )]
    mode: Mode,

    #[arg(long = "dry-run", help = "Dry-run: simulate without actual copying")]
    dry_run: bool,

    #[arg(long = "database", help = "SQLite database file path", default_value = "db.sqlite3")]
    database: PathBuf,
    
    #[arg(long = "action", help = "Action type: COPY(copy files) or REPORT(export report)", default_value = "COPY")]
    action: Action,
    
    #[arg(short = 'f', long = "include-filter", help = "Include filename regex filter")]
    include_filter: Option<String>,
    
    #[arg(short = 'x', long = "exclude-filter", help = "Exclude filename regex filter")]
    exclude_filter: Option<String>,
}

// 优化的日志宏
macro_rules! VERBOSE {
    ($verbose:ident, $log: expr) => {
        if $verbose {
            $log
        }
    };
}

// 改进的锁保护宏，提供更好的错误处理
macro_rules! with_lock {
    ($lock:expr, $action:expr) => {
        match $lock {
            Ok(guard) => $action(guard),
            Err(err) => Err(anyhow::anyhow!("Lock operation failed: {:?}", err.into_inner())),
        }
    };
}

// 线程任务执行宏，保留错误
macro_rules! thread_task {
    ($f:expr) => {
        move || {
            if let Err(e) = $f() {
                log::error!("Thread task failed: {:?}", e);
            }
        }
    };
}

// 从数据库导出重复文件信息为JSON
fn export_duplicates_to_json(db_path: &PathBuf) -> Result<()> {
    use serde::Serialize;
    use serde_json::json;
    use std::collections::HashMap;
    
    #[derive(Serialize)]
    struct DuplicateFile {
        path: String,
    }
    
    #[derive(Serialize)]
    struct DuplicateGroup {
        hash_code: String,
        files: Vec<DuplicateFile>,
        created_at: String,
    }
    
    // 连接数据库
    let conn = Connection::open(db_path)
        .with_context(|| format!("Failed to open database: {:?}", db_path))?;
    
    // 查询所有重复文件组
    let mut stmt = conn.prepare(r#"
        SELECT 
            dg.id, 
            dg.hash_code, 
            dg.created_at, 
            df.file_path 
        FROM 
            duplicate_groups dg
        JOIN 
            duplicate_files df ON dg.id = df.group_id
        ORDER BY 
            dg.id, df.id
    "#)
        .with_context(|| "Failed to prepare query")?;
    
    let rows = stmt.query_map([], |row| {
        Ok((
            row.get::<_, i64>(0)?,
            row.get::<_, String>(1)?,
            row.get::<_, String>(2)?,
            row.get::<_, String>(3)?,
        ))
    })
    .with_context(|| "Failed to execute query")?;
    
    // 按组组织数据
    let mut groups = HashMap::new();
    for row in rows {
        let (group_id, hash_code, created_at, file_path) = row.with_context(|| "Failed to parse query result")?;
        
        let group = groups.entry(group_id).or_insert_with(|| DuplicateGroup {
            hash_code,
            files: Vec::new(),
            created_at,
        });
        
        group.files.push(DuplicateFile {
            path: file_path,
        });
    }
    
    // 转换为JSON数组
    let groups_array: Vec<_> = groups.values().collect();
    let json_result = json!({"duplicate_groups": groups_array});
    
    // 输出到stdout
    println!("{}", serde_json::to_string_pretty(&json_result)?);
    
    Ok(())
}

fn main() -> Result<()> {
    // 初始化环境和日志
    dotenv::dotenv().ok();
    env_logger::init();

    let start = Instant::now();

    // 解析命令行参数
    let opt: Arc<Opt> = Arc::new(Opt::parse());
    let verbose = opt.verbose;
    
    VERBOSE!(verbose, debug!("{:?}", opt));
    VERBOSE!(verbose, info!(""));
    
    // 根据action参数执行不同操作
    match opt.action {
        Action::Copy => {
            VERBOSE!(verbose, info!("Starting copy"));
            // 创建必要的输出目录
            if !opt.out_folder.exists() {
                create_dir_all(&opt.out_folder)
                    .with_context(|| format!("Failed to create output directory: {:?}", opt.out_folder))?;
            }

            // 创建过滤器
            let (include_regex, exclude_regex) = create_filters(&opt.include_filter, &opt.exclude_filter)?;

            // 构建初始out目录快照
            let mut out_snapshot = build_out_snapshot(verbose, &opt.out_folder)
                .with_context(|| "Failed to build output snapshot")?;

            // 创建线程池（仅用于并发哈希），复制保持顺序
            let thread_pool = match opt.mode {
                Mode::Single => None,
                Mode::Multi => {
                    let cpu_count = num_cpus::get();
                    VERBOSE!(verbose, info!("Parallel hashing, threads: {}", cpu_count));
                    Some(ThreadPool::new(cpu_count))
                }
            };

            VERBOSE!(verbose, info!("thread_pool : {:?}", thread_pool));

            // 记录重复文件组（hash -> 路径列表）
            let mut duplicate_files_map: HashMap<String, Vec<String>> = HashMap::new();
            // 收集需要复制的唯一文件（延后统一顺序复制）
            let files_to_copy_map = Arc::new(RwLock::new(HashMap::<String, (String, PathBuf)>::new()));

            // 按传入顺序遍历输入文件夹
            for in_folder in &opt.in_folders {
                let files = collect_file_paths_for_input(verbose, in_folder, &include_regex, &exclude_regex)
                    .with_context(|| format!("Failed to collect input directory files: {:?}", in_folder.path))?;

                // 计算哈希并构建该输入目录的唯一/重复映射
                let (unique_map, dup_map) = process_files(verbose, &thread_pool, &files)
                    .with_context(|| "Failed to compute file hashes")?;

                // 合并重复映射到全局
                with_lock!(dup_map.read(), |guard: std::sync::RwLockReadGuard<HashMap<String, Vec<String>>>| {
                    for (hash, paths) in guard.iter() {
                        let entry = duplicate_files_map.entry(hash.clone()).or_insert_with(Vec::new);
                        for p in paths {
                            if !entry.contains(p) {
                                entry.push(p.clone());
                            }
                        }
                    }
                    Ok(())
                })?;

                // 处理唯一文件：与 out 快照比较，决定复制或记录重复
                with_lock!(unique_map.read(), |guard: std::sync::RwLockReadGuard<HashMap<String, (String, PathBuf)>>| {
                    for (hash_code, (full_path_name, relative_path)) in guard.iter() {
                        let rel_str = relative_path.to_str().unwrap_or("").to_string();

                        if let Some(out_paths) = out_snapshot.get(hash_code) {
                            // 与 out 存在重复：记录重复组（含 out 中的路径与当前路径）
                            let entry = duplicate_files_map.entry(hash_code.clone()).or_insert_with(Vec::new);
                            for p in out_paths {
                                if !entry.contains(p) {
                                    entry.push(p.clone());
                                }
                            }
                            if !entry.contains(&rel_str) {
                                entry.push(rel_str.clone());
                            }
                        } else {
                            // 不重复：加入待复制列表；dry-run 下也更新快照
                            with_lock!(files_to_copy_map.write(), |mut w: std::sync::RwLockWriteGuard<HashMap<String, (String, PathBuf)>>| {
                                w.insert(hash_code.clone(), (full_path_name.clone(), relative_path.clone()));
                                Ok(())
                            })?;
                            let out_rel_str = rel_str;
                            out_snapshot.entry(hash_code.clone()).or_insert_with(Vec::new).push(out_rel_str);
                        }
                    }
                    Ok(())
                })?;
            }

            // 统一执行复制（保持单线程）
            if !opt.dry_run {
                copy_files_to_output(verbose, &None, &files_to_copy_map, &opt.out_folder)
                    .with_context(|| "Failed to copy unique files to output directory")?;
            }

            // 输出统计
            let copied_count = with_lock!(files_to_copy_map.read(), |guard: std::sync::RwLockReadGuard<HashMap<String, (String, PathBuf)>>| {
                Ok(guard.len())
            })?;
            VERBOSE!(verbose, info!("Sequential copy finished, copied {} files", copied_count));
            VERBOSE!(verbose, info!("Found {} duplicate groups", duplicate_files_map.len()));

            // 初始化数据库并保存重复文件信息
            init_database(&opt.database).with_context(|| "Failed to initialize database")?;
            let duplicates_arc = Arc::new(RwLock::new(duplicate_files_map));
            save_duplicates_to_database(&opt.database, &duplicates_arc)
                .with_context(|| "Failed to save duplicate file info")?;
        },
        Action::Report => {
            export_duplicates_to_json(&opt.database)
                .with_context(|| "Failed to export duplicate file info")?;
        }
    }

    let end = Instant::now();
    let duration = end.duration_since(start).as_millis();
    
    info!("Task completed, total time {} ms", duration);

    Ok(())
}

// 初始化SQLite数据库并创建表
fn init_database(db_path: &PathBuf) -> Result<Connection> {
    let conn = Connection::open(db_path)
        .with_context(|| format!("Failed to open database: {:?}", db_path))?;
    
    // 创建表结构
    conn.execute(
        "CREATE TABLE IF NOT EXISTS duplicate_groups (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            hash_code TEXT NOT NULL UNIQUE,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        )",
        [],
    ).with_context(|| "Failed to create duplicate_groups table")?;
    
    conn.execute(
        "CREATE TABLE IF NOT EXISTS duplicate_files (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            group_id INTEGER NOT NULL,
            file_path TEXT NOT NULL,
            FOREIGN KEY (group_id) REFERENCES duplicate_groups(id)
        )",
        [],
    ).with_context(|| "Failed to create duplicate_files table")?;
    
    // 创建索引提高查询性能
    conn.execute(
        "CREATE INDEX IF NOT EXISTS idx_duplicate_groups_hash_code ON duplicate_groups(hash_code)",
        [],
    ).with_context(|| "Failed to create hash_code index")?;
    
    conn.execute(
        "CREATE INDEX IF NOT EXISTS idx_duplicate_files_group_id ON duplicate_files(group_id)",
        [],
    ).with_context(|| "Failed to create group_id index")?;
    
    Ok(conn)
}

// 将重复文件信息保存到SQLite数据库
fn save_duplicates_to_database(
    db_path: &PathBuf,
    duplicate_files_map: &Arc<RwLock<HashMap<String, Vec<String>>>>
) -> Result<()> {
    let mut conn = init_database(db_path)?;
    
    // 使用事务批量插入数据
    let tx = conn.transaction()
        .with_context(|| "Failed to begin DB transaction")?;
    
    // 使用with_lock!宏获取重复文件映射的只读锁
    let duplicates = with_lock!(duplicate_files_map.read(), |guard: std::sync::RwLockReadGuard<HashMap<String, Vec<String>>>| {
        Ok(guard.clone())
    })?;
    
    for (hash_code, file_paths) in duplicates.iter() {
        // 插入组信息
        let mut stmt = tx.prepare(
            "INSERT OR IGNORE INTO duplicate_groups (hash_code) VALUES (?)",
        ).with_context(|| "Failed to prepare insert group statement")?;
        
        stmt.execute([hash_code])
            .with_context(|| format!("Failed to insert group: {}", hash_code))?;
        
        // 获取组ID
        let group_id: i64 = tx.query_row("SELECT id FROM duplicate_groups WHERE hash_code = ?", params![hash_code], |row| row.get(0))
            .with_context(|| format!("Failed to get group ID: {}", hash_code))?;
        
        // 插入文件信息
        let mut stmt = tx.prepare(
            "INSERT OR IGNORE INTO duplicate_files (group_id, file_path) VALUES (?, ?)",
        ).with_context(|| "Failed to prepare insert file statement")?;
        
        for file_path in file_paths {
            stmt.execute(params![group_id, file_path])
                .with_context(|| format!("Failed to insert file: {} - {}", file_path, hash_code))?;
        }
    }
    
    // 提交事务
    tx.commit()
        .with_context(|| "Failed to commit DB transaction")?;
    
    Ok(())
}

// 创建文件名过滤器
fn create_filters(
    include_filter: &Option<String>,
    exclude_filter: &Option<String>,
) -> Result<(Option<Regex>, Option<Regex>)> {
    let include_regex = match include_filter {
        Some(pattern) => Some(Regex::new(pattern).with_context(|| format!("Invalid include filter regex: {}", pattern))?),
        None => None,
    };
    
    let exclude_regex = match exclude_filter {
        Some(pattern) => Some(Regex::new(pattern).with_context(|| format!("Invalid exclude filter regex: {}", pattern))?),
        None => None,
    };
    
    Ok((include_regex, exclude_regex))
}

// 检查文件名是否通过过滤器
fn should_include_file(file_path: &Path, include_regex: &Option<Regex>, exclude_regex: &Option<Regex>) -> bool {
    // 获取文件名
    let file_name = match file_path.file_name().and_then(|name| name.to_str()) {
        Some(name) => name,
        None => return false, // 无法获取文件名的文件跳过
    };
    
    // 先检查排除过滤器
    if let Some(regex) = exclude_regex {
        if regex.is_match(file_name) {
            return false;
        }
    }
    
    // 再检查包含过滤器
    match include_regex {
        Some(regex) => regex.is_match(file_name),
        None => true, // 如果没有包含过滤器，则默认包含所有文件
    }
}

// 收集单个输入文件夹中的文件路径（支持别名）
fn collect_file_paths_for_input(
    verbose: bool,
    in_folder: &InputFolder,
    include_regex: &Option<Regex>,
    exclude_regex: &Option<Regex>,
) -> Result<Vec<(String, PathBuf)>> {
    let path_set = Arc::new(RwLock::new(HashSet::<(String, PathBuf)>::new()));

    let in_path = &in_folder.path;
    let alias = in_folder.alias.clone();
    VERBOSE!(verbose, debug!("Processing input folder: {:?} (alias: {:?})", in_path, alias));

    if !in_path.exists() {
        warn!("Input folder does not exist: {:?}", in_path);
        return Ok(Vec::new());
    }

    let path_set_cloned = path_set.clone();
    let in_path_cloned = in_path.clone();
    let alias_cloned = alias.clone();

    walk_folder(in_path, &mut move |p: &Path| {
        if !should_include_file(p, include_regex, exclude_regex) {
            return;
        }

        if let Ok(rel_under_in) = p.strip_prefix(&in_path_cloned) {
            let relative_path = match &alias_cloned {
                Some(a) => PathBuf::from(a).join(rel_under_in),
                None => {
                    match in_path_cloned.file_name().and_then(|s| s.to_str()) {
                        Some(name) => PathBuf::from(name).join(rel_under_in),
                        None => rel_under_in.to_path_buf(),
                    }
                }
            };

            if let Ok(absolute_path) = p.canonicalize() {
                if let Some(full_path_str) = absolute_path.to_str() {
                    let full_path_name = String::from(full_path_str);
                    if let Ok(mut path_set_writer) = path_set_cloned.write() {
                        path_set_writer.insert((full_path_name, relative_path.clone()));
                    }
                }
            }
        }
    })
    .with_context(|| format!("Failed to walk folder: {:?}", in_path))?;

    let result = with_lock!(path_set.read(), |path_set_reader: std::sync::RwLockReadGuard<std::collections::HashSet<(String, PathBuf)>>| {
        let mut ret = Vec::with_capacity(path_set_reader.len());
        for (full_path_name, relative_path) in path_set_reader.iter() {
            ret.push((full_path_name.clone(), relative_path.clone()));
        }
        ret.sort_by(|(f1, _), (f2, _)| f1.cmp(f2));
        Ok(ret)
    });
    result
}

// 构建输出目录的哈希快照（hash -> 该hash在out中的相对路径列表）
fn build_out_snapshot(verbose: bool, out_folder: &Path) -> Result<HashMap<String, Vec<String>>> {
    let mut snapshot: HashMap<String, Vec<String>> = HashMap::new();
    if !out_folder.exists() {
        return Ok(snapshot);
    }
    let out_root = out_folder.to_path_buf();

    walk_folder(out_folder, &mut |p: &Path| {
        if let Ok(rel) = p.strip_prefix(&out_root) {
            if let Some(abs_str) = p.to_str() {
                if let Ok(hash) = hash_file(verbose, abs_str) {
                    let rel_str = rel.to_str().unwrap_or("").to_string();
                    let entry = snapshot.entry(hash).or_insert_with(Vec::new);
                    if !entry.contains(&rel_str) {
                        entry.push(rel_str);
                    }
                }
            }
        }
    })?;

    Ok(snapshot)
}

// 处理文件并建立哈希映射
fn process_files(
    verbose: bool,
    thread_pool: &Option<ThreadPool>,
    file_paths: &[(String, PathBuf)],
) -> Result<(
    Arc<RwLock<HashMap<String, (String, PathBuf)>>>,
    Arc<RwLock<HashMap<String, Vec<String>>>>
)> {
    let unique_files_map = Arc::new(RwLock::new(HashMap::<String, (String, PathBuf)>::new()));
    let duplicate_files_map = Arc::new(RwLock::new(HashMap::<String, Vec<String>>::new()));

    match thread_pool {
        Some(pool) => {
            process_files_parallel(
                verbose, pool, file_paths, 
                unique_files_map.clone(), 
                duplicate_files_map.clone()
            )?;
        },
        None => {
            process_files_sequential(
                verbose, file_paths, 
                unique_files_map.clone(), 
                duplicate_files_map.clone()
            )?;
        }
    }

    Ok((unique_files_map, duplicate_files_map))
}

// 并行处理文件
fn process_files_parallel(
    verbose: bool,
    pool: &ThreadPool,
    file_paths: &[(String, PathBuf)],
    unique_files_map: Arc<RwLock<HashMap<String, (String, PathBuf)>>>,
    duplicate_files_map: Arc<RwLock<HashMap<String, Vec<String>>>>,
) -> Result<()> {
    let (tx, rx) = mpsc::channel();

    for (full_path_name, relative_path) in file_paths {
        let full_path_name = full_path_name.clone();
        let relative_path = relative_path.clone();
        let tx = tx.clone();

        pool.execute(thread_task!(move || -> Result<()> {
            match hash_file(verbose, &full_path_name) {
                Ok(hash_code) => {
                    tx.send(Some((hash_code, full_path_name, relative_path)))?;
                },
                Err(e) => {
                    log::error!("Failed to compute file hash {}: {:?}", full_path_name, e);
                    tx.send(None)?;
                }
            }
            Ok(())
        }));
    }

    // 处理所有结果
    for _ in 0..file_paths.len() {
        if let Ok(Some((hash_code, full_path_name, relative_path))) = rx.recv() {
            update_file_maps(
                unique_files_map.clone(),
                duplicate_files_map.clone(),
                hash_code,
                full_path_name,
                relative_path,
            )?;
        }
    }

    Ok(())
}

// 顺序处理文件
fn process_files_sequential(
    verbose: bool,
    file_paths: &[(String, PathBuf)],
    unique_files_map: Arc<RwLock<HashMap<String, (String, PathBuf)>>>,
    duplicate_files_map: Arc<RwLock<HashMap<String, Vec<String>>>>,
) -> Result<()> {
    for (full_path_name, relative_path) in file_paths {
        VERBOSE!(verbose, debug!("Copy file: {}", full_path_name));

        match hash_file(verbose, full_path_name) {
            Ok(hash_code) => {
                update_file_maps(
                    unique_files_map.clone(),
                    duplicate_files_map.clone(),
                    hash_code,
                    full_path_name.clone(),
                    relative_path.clone(),
                )?;
            },
            Err(e) => {
                log::error!("Failed to compute file hash {}: {:?}", full_path_name, e);
            }
        }
    }
    Ok(())
}

// 更新文件映射，处理唯一文件和重复文件
fn update_file_maps(
    unique_files_map: Arc<RwLock<HashMap<String, (String, PathBuf)>>>,
    duplicate_files_map: Arc<RwLock<HashMap<String, Vec<String>>>>,
    hash_code: String,
    full_path_name: String,
    relative_path: PathBuf,
) -> Result<()> {
    // 将相对路径转换为字符串
    let relative_path_str = relative_path.to_str()
        .ok_or_else(|| anyhow::anyhow!("Failed to convert path to string: {:?}", relative_path))?
        .to_string();
    
    // 先检查该哈希值是否已存在于唯一文件映射中
    let exists = with_lock!(unique_files_map.read(), |guard: std::sync::RwLockReadGuard<HashMap<String, (String, PathBuf)>>| {
        Ok(guard.contains_key(&hash_code))
    })?;
    
    if !exists {
        // 这是一个新的唯一文件
        with_lock!(unique_files_map.write(), |mut guard: std::sync::RwLockWriteGuard<HashMap<String, (String, PathBuf)>>| {
            guard.insert(hash_code, (full_path_name, relative_path));
            Ok(())
        })
    } else {
        // 这是一个重复文件
        // 检查是否需要创建新的重复文件条目
        let needs_new_entry = with_lock!(duplicate_files_map.read(), |guard: std::sync::RwLockReadGuard<HashMap<String, Vec<String>>>| {
            Ok(!guard.contains_key(&hash_code))
        })?;
        
        if needs_new_entry {
            // 为这个哈希码创建一个新的重复文件列表，包含第一个文件的相对路径
            let original_rel_path = with_lock!(unique_files_map.read(), |guard: std::sync::RwLockReadGuard<HashMap<String, (String, PathBuf)>>| {
                match guard.get(&hash_code) {
                    Some((_, rel_path)) => match rel_path.to_str() {
                        Some(s) => Ok(s.to_string()),
                        None => Err(anyhow::anyhow!("Failed to convert path to string")),
                    },
                    None => Err(anyhow::anyhow!("Inconsistency error: unique files map operation failed")),
                }
            })?;
            
            with_lock!(duplicate_files_map.write(), |mut guard: std::sync::RwLockWriteGuard<HashMap<String, Vec<String>>>| {
                guard.insert(hash_code.clone(), vec![original_rel_path]);
                Ok(())
            })?;
        }
        
        // 将当前文件的相对路径添加到重复列表中
        with_lock!(duplicate_files_map.write(), |mut guard: std::sync::RwLockWriteGuard<HashMap<String, Vec<String>>>| {
            if let Some(paths) = guard.get_mut(&hash_code) {
                paths.push(relative_path_str);
                Ok(())
            } else {
                Err(anyhow::anyhow!("Inconsistency error: duplicate files map operation failed"))
            }
        })
    }
}

// 将唯一文件复制到输出目录
fn copy_files_to_output(
    verbose: bool,
    thread_pool: &Option<ThreadPool>,
    unique_files_map: &Arc<RwLock<HashMap<String, (String, PathBuf)>>>,
    out_folder: &Path,
) -> Result<()> {
    // 获取所有唯一文件的克隆，避免长时间持有锁
    let files_to_copy = with_lock!(unique_files_map.read(), 
        |guard: std::sync::RwLockReadGuard<HashMap<String, (String, PathBuf)>>| {
            let mut result: Vec<(String, String, PathBuf)> = Vec::new();
            for (hash, (full_path, rel_path)) in guard.iter() {
                result.push((hash.clone(), full_path.clone(), rel_path.clone()));
            }
            Ok(result)
        })?;
    
    // 处理每个文件的复制
    for (hash, full_path_name, relative_path) in files_to_copy {
        VERBOSE!(verbose, debug!("Copy file [{}]: {} -> {:?}", 
            hash, full_path_name, relative_path));

        let in_path = PathBuf::from(full_path_name);
        let out_path = out_folder.join(relative_path);

        // 确保目标目录存在
        if let Some(parent) = out_path.parent() {
            create_dir_all(parent)
                .with_context(|| format!("Failed to create destination directory: {:?}", parent))?;
        }

        match thread_pool {
            Some(pool) => {
                let in_path_clone = in_path.clone();
                let out_path_clone = out_path.clone();
                let verbose_clone = verbose;
                
                pool.execute(thread_task!(move || -> Result<()> {
                    let in_path_error = in_path_clone.clone();
                    let out_path_debug = out_path_clone.clone();
                    copy(in_path_clone, out_path_clone)
                        .with_context(|| format!("Failed to copy file: {:?}", in_path_error))?;
                    VERBOSE!(verbose_clone, debug!("File copied: {:?}", out_path_debug));
                    Ok(())
                }));
            },
            None => {
                copy(&in_path, &out_path)
                    .with_context(|| format!("Failed to copy file: {:?}", in_path))?;
            }
        }
    }
    
    Ok(())
}

// 计算文件的 SHA3-256 哈希值
fn hash_file(verbose: bool, file_path: &str) -> Result<String> {
    let mut file = File::open(file_path)
        .with_context(|| format!("Failed to open file: {}", file_path))?;

    // 使用 SHA3-256（tiny-keccak）
    let mut hasher = Sha3::v256();
    let mut buffer = [0u8; 64 * 1024];

    loop {
        let bytes_read = file.read(&mut buffer)
            .with_context(|| format!("Failed to read file: {}", file_path))?;

        if bytes_read == 0 {
            break;
        }

        hasher.update(&buffer[0..bytes_read]);
    }

    let mut output = [0u8; 32];
    hasher.finalize(&mut output);

    let hash_string = output.encode_hex::<String>();
    VERBOSE!(verbose, debug!("File {} hash: {}", file_path, hash_string));
    Ok(hash_string)
}

// 遍历文件夹并对每个文件应用访问者函数
fn walk_folder<F>(in_path: &Path, visitor: &mut F) -> Result<()>
where
    F: FnMut(&Path),
{
    if !in_path.is_dir() {
        return Err(anyhow::anyhow!("Path is not a directory: {:?}", in_path));
    }
    walk_folder_recursive(in_path, visitor)
}

// 递归遍历文件夹的内部实现
fn walk_folder_recursive<F>(in_path: &Path, visitor: &mut F) -> Result<()>
where
    F: FnMut(&Path),
{
    for entry_result in in_path.read_dir()
        .with_context(|| format!("Read directory failed: {:?}", in_path))? {
        
        let entry = entry_result
            .with_context(|| format!("Process directory entry failed: {:?}", in_path))?;
        
        let path = entry.path();
        let file_type = entry.file_type()
            .with_context(|| format!("Get file type failed: {:?}", path))?;
        
        if file_type.is_dir() {
            // 递归处理子文件夹
            walk_folder_recursive(&path, visitor)
                .with_context(|| format!("Process subfolder failed: {:?}", path))?;
        } else if file_type.is_file() {
            // 对文件应用访问者函数
            visitor(&path);
        }
    }
    
    Ok(())
}

#[cfg(test)]
mod tests {
    use super::*;
    use std::fs::write;
    use tempfile::{tempdir, TempDir};

    // 创建测试环境
    fn setup_test_environment() -> Result<(TempDir, TempDir, Vec<PathBuf>)> {
        let in_dir = tempdir()?;
        let out_dir = tempdir()?;
        
        // 创建测试文件
        let test_files = vec![
            ("file1.txt", "content1"),
            ("file2.txt", "content2"),
            ("subdir/file3.txt", "content1"), // 与file1.txt内容相同
        ];
        
        let mut created_paths = Vec::new();
        
        for (rel_path, content) in &test_files {
            let full_path = in_dir.path().join(rel_path);
            
            // 确保目录存在
            if let Some(parent) = full_path.parent() {
                create_dir_all(parent)?;
            }
            
            write(&full_path, content)?;
            created_paths.push(full_path);
        }
        
        Ok((in_dir, out_dir, created_paths))
    }

    #[test]
    fn test_hash_file() {
        let (_in_dir, _out_dir, test_files) = setup_test_environment().unwrap();
        
        // 测试相同内容的文件生成相同的哈希值
        let hash1 = hash_file(false, test_files[0].to_str().unwrap()).unwrap();
        let hash3 = hash_file(false, test_files[2].to_str().unwrap()).unwrap();
        
        assert_eq!(hash1, hash3, "相同内容的文件应该生成相同的哈希值");
        
        // 测试不同内容的文件生成不同的哈希值
        let hash2 = hash_file(false, test_files[1].to_str().unwrap()).unwrap();
        
        assert_ne!(hash1, hash2, "不同内容的文件应该生成不同的哈希值");
    }
}
