# cleaners.py
import psutil
import platform
import os
import json
from datetime import datetime
import hashlib
from concurrent.futures import ThreadPoolExecutor
import threading
import time
from pathlib import Path
import sys

from typing import List

from code.config import CleanerConfig
from typing import Optional
from typing import Callable
from typing import Dict

class BaseCleaner:
    """清理器基类，定义扫描和清理的接口"""
    
    def __init__(self, config: CleanerConfig):
        self.config = config
        self.scan_results = []
        self.last_memory_check = time.time()  # 初始化最后检查时间
        self.pause_event = threading.Event()  # 用于暂停扫描的事件
        self.stop_event = threading.Event()   # 用于停止扫描的事件
        self.progress = 0                     # 当前扫描进度
        self.scan_active = False              # 扫描活动状态
        self.log_callback = None              # 日志回调函数
        self.consecutive_high_memory = 0      # 连续高内存使用次数
        self.large_file_threshold = 100 * 1024 * 1024  # 100MB以上视为大文件
        

    # 设置日志回调函数
    def set_log_callback(self, log_callback: Callable[[str], None]):
        self.log_callback = log_callback

    def log_message(self, message: str):
        """记录日志消息"""
        if self.log_callback:
            self.log_callback(message)
        print(message)

    # 新增内存监控方法
    def memory_usage(self) -> float:
        """获取当前内存使用百分比"""
        try:
            return psutil.virtual_memory().percent
        except:
            return 0  # 如果不可用，返回0

    def check_memory_threshold(self):
        """检查内存是否超过阈值，如果超过则暂停"""
        current_time = time.time()
        if current_time - self.last_memory_check < 1:  # 至少1秒检查一次
            return False
            
        self.last_memory_check = current_time
        mem_usage = self.memory_usage()
        
        if mem_usage > self.config.memory_threshold:
            # 动态调整等待时间：连续高内存使用次数越多，等待时间越长
            wait_time = min(3 + self.consecutive_high_memory, 10)
            self.log_message(f"⚠️ 内存使用过高 ({mem_usage}%), 暂停扫描 {wait_time} 秒...")
            time.sleep(wait_time)
            self.consecutive_high_memory += 1  # 增加连续高内存计数
            return True
        else:
            self.consecutive_high_memory = max(0, self.consecutive_high_memory - 1)  # 重置计数
            return False

    def _recursive_scan(self, root_dir: str, progress_callback=None) -> List[str]:
        """带目录过滤的递归扫描方法"""
        allowed_files = []
        self.log_message(f"开始扫描根目录: {root_dir}")

        # Windows 保留设备名称列表
        windows_reserved_names = {
            'CON', 'PRN', 'AUX', 'NUL', 'COM1', 'COM2', 'COM3', 'COM4', 
            'COM5', 'COM6', 'COM7', 'COM8', 'COM9', 'LPT1', 'LPT2', 'LPT3', 
            'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9'
        }
        
        # 确保root_dir是绝对路径
        root_dir = os.path.abspath(root_dir)
        
        for current_dir, sub_dirs, files in os.walk(root_dir):
            # 定期检查内存使用
            if self.check_memory_threshold():
                self.log_message("内存保护机制激活，暂停后继续扫描...")
                
            # 检查当前目录是否排除
            if self.config.is_dir_excluded(current_dir):
                self.log_message(f"跳过排除目录: {current_dir}")
                sub_dirs[:] = []  # 跳过所有子目录
                continue
                
            # 检查当前目录是否允许扫描
            if not self.config.is_dir_allowed(current_dir):
                self.log_message(f"跳过目录: {current_dir} (不符合过滤条件)")
                continue  # 跳过不符合条件的目录

            # 检查 Windows 保留设备名称
            if self.config.system_type == 'windows':
                try:
                    # 检查当前路径是否包含保留名称
                    dir_name = os.path.basename(current_dir).upper()
                    if dir_name in windows_reserved_names:
                        self.log_message(f"跳过 Windows 保留目录: {current_dir}")
                        sub_dirs[:] = []  # 跳过所有子目录
                        continue
                except:
                    pass
            
            # 处理文件
            for file in files:
                # 检查文件名是否为 Windows 保留名称
                if self.config.system_type == 'windows':
                    file_name_upper = file.upper()
                    if file_name_upper in windows_reserved_names or \
                    any(file_name_upper.startswith(reserved) for reserved in windows_reserved_names):
                        self.log_message(f"跳过 Windows 保留文件: {file}")
                        continue
                file_path = os.path.join(current_dir, file)
                try:
                    abs_path = os.path.abspath(file_path)  # 使用绝对路径
                    
                    # 检查文件后缀是否排除
                    if self.config.is_extension_excluded(abs_path):
                        continue
                        
                    # 新增文件有效性验证
                    if os.path.exists(abs_path) and os.access(abs_path, os.R_OK):
                        # 检查文件大小，跳过过大文件
                        try:
                            file_size = os.path.getsize(abs_path)
                            if file_size > self.large_file_threshold:
                                self.log_message(f"⚠️ 跳过大文件: {abs_path} ({file_size/1024/1024:.2f} MB)")
                                continue
                        except:
                            pass
                            
                        allowed_files.append(abs_path)
                        if progress_callback:
                            self._update_progress(progress_callback)
                    if len(allowed_files) % 100 == 0 and self.check_memory_threshold():
                        self.log_message("内存保护机制激活，暂停后继续扫描...")
                except OSError as e:
                    error_msg = f"⚠️ 文件访问错误跳过: {file_path} ({str(e)})"
                    self.log_message(error_msg)
                    continue  # 继续处理下一个文件
            
            # 处理子目录（根据递归设置）
            try:
                if self.config.recursive:
                    # 保留允许扫描的子目录
                    sub_dirs[:] = [d for d in sub_dirs 
                              if self.config.is_dir_allowed(os.path.join(current_dir, d)) and
                              (self.config.system_type != 'windows' or 
                               d.upper() not in windows_reserved_names)]
                else:
                    # 非递归模式，清空子目录列表
                    sub_dirs[:] = []
            except OSError as e:
                error_msg = f"⚠️ 目录处理错误跳过: {current_dir} ({str(e)})"
                self.log_message(error_msg)
        
        return allowed_files
    
    def _update_progress(self, progress_callback):
        """简化的进度更新逻辑"""
        if progress_callback:
            progress_callback(1)  # 每次处理一个文件更新1%

    def _check_pause(self):
        """检查是否需要暂停扫描"""
        while self.pause_event.is_set() and not self.stop_event.is_set():
            self.log_message("扫描暂停中...")
            time.sleep(1)
    
    def pause_scan(self):
        """暂停扫描"""
        self.pause_event.set()
    
    def resume_scan(self):
        """继续扫描"""
        self.pause_event.clear()
    
    def stop(self):
        """停止扫描"""
        self.stop_event.set()
        self.scan_active = False
        
    def scan(self, progress_callback: Optional[Callable[[int], None]] = None) -> List[str]:
        """执行扫描操作，返回可清理的项目列表"""
        raise NotImplementedError("子类必须实现scan方法")
        
    def clean(self) -> int:
        """执行清理操作，返回已清理的项目数量"""
        raise NotImplementedError("子类必须实现clean方法")
        
    def save_scan_results(self, scan_type: str) -> dict:
        """保存详细的扫描结果"""
        from pathlib import Path
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        os.makedirs('scan_results', exist_ok=True)
        filename = f"scan_results/{scan_type}_{timestamp}.json"
        
        # 保存分组数据（每个分组包含文件列表）
        groups = []
        for file_hash, files in self.duplicate_groups.items():
            # 规范化所有路径（使用原始路径，不转换为绝对路径）
            normalized_files = []
            for f in files:
                normalized_files.append(str(Path(f)))  # 仅规范化格式，保留原始路径
            try:
                groups.append({
                    "hash": file_hash,
                    "files": normalized_files,  # 保存规范化后的路径（原始字符串）
                    "count": len(files),
                    "total_size": sum(os.path.getsize(f) for f in files if os.path.exists(f))
                })
            except Exception as e:
                error_msg = f"保存扫描结果时出错: {e}"
                self.log_message(error_msg)
        
        # 保存文件夹数据
        folder_groups = []
        if hasattr(self, 'duplicate_folders'):
            for group_id, group_info in self.duplicate_folders.items():
                try:
                    # 规范化文件夹路径（使用原始路径）
                    normalized_folders = []
                    for folder in group_info["folders"]:
                        normalized_folders.append(str(Path(folder)))  # 仅规范化格式
                    
                    folder_groups.append({
                        "group_id": group_id,  # 唯一组标识
                        "folders": normalized_folders,
                        "count": len(group_info["folders"]),
                        "total_size": group_info["total_size"],
                        "signature": group_info.get("signature", "")
                    })
                except Exception as e:
                    error_msg = f"保存文件夹扫描结果时出错: {e}"
                    self.log_message(error_msg)
        
        results = {
            'timestamp': timestamp,
            'scan_type': scan_type,
            'system': self.config.system_type,
            'target_dirs': self.config.target_dirs,
            'groups': groups,  # 保存文件分组信息
            'folder_groups': folder_groups,  # 保存文件夹分组信息
            'clean_list': self.scan_results  # 保存清理列表
        }
        
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(results, f, indent=2, ensure_ascii=False)  # 允许保存非ASCII字符
            
        return results

class SmartDuplicateCleaner(BaseCleaner):
    """智能重复文件清理器，支持基于哈希的文件比较和文件夹扫描"""
    
    def __init__(self, config: CleanerConfig):
        super().__init__(config)
        self.duplicate_groups = {}
        self.duplicate_folders = {}
        self.cache_dir = os.path.join(self.config.get_cache_dir(), ".cleaner_cache")
        self.last_memory_check = time.time()
        self.large_file_threshold = 50 * 1024 * 1024  # 50MB以上视为大文件，更严格
        self.duplicate_result_file = None  # 存储重复文件扫描结果文件路径
        
    def scan(self, progress_callback: Optional[Callable[[int], None]] = None) -> List[str]:
        # 新增：标记是否已完成文件夹扫描
        # folder_scan_done = False
        
        self.log_message("🔍 开始扫描重复项...")
        """执行重复文件和文件夹扫描"""
        self.log_message("🔍 开始扫描重复项...")
        self.stop_event.clear()
        self.scan_active = True

        # 创建缓存目录
        os.makedirs(self.cache_dir, exist_ok=True)

        # 创建暂停事件
        self.pause_event = threading.Event()
        
        # 打印配置信息
        self.log_message(f"扫描目录: {self.config.target_dirs}, 递归: {self.config.recursive}")
        self.log_message(f"文件夹检测: {'开启' if self.config.check_folders else '关闭'}")
        self.log_message(f"内存阈值: {self.config.memory_threshold}%")
        self.log_message(f"大文件阈值: {self.large_file_threshold/1024/1024:.0f} MB")
        
        # 计算总文件数用于进度计算
        total_files = self._count_total_files()
        self.log_message(f"预计扫描文件总数: {total_files}")
        
        # 预估扫描时间
        estimated_minutes = max(1, total_files // 50000)  # 每5万文件约1分钟
        self.log_message(f"预计扫描时间: {estimated_minutes} 分钟")
        
        processed_files = 0

        # 检查是否有提供的重复文件扫描结果
        if self.duplicate_result_file:
            self.log_message(f"从文件加载重复文件结果: {self.duplicate_result_file}")
            self._load_duplicate_groups_from_file(self.duplicate_result_file)
            # 当提供了重复文件结果时，强制启用文件夹检测
            self.config.check_folders = True
            self.log_message("已加载重复文件结果，将只扫描重复文件夹")

        size_map = {}  # 确保 size_map 总是有定义
        resume_data = {}  # 初始化 resume_data 避免 None 错误
        
        # 仅当未启用文件夹检测时才扫描文件
        state_file = os.path.join(self.cache_dir, "size_map_state.json")
        if not os.path.exists(state_file):
            os.makedirs(os.path.dirname(state_file), exist_ok=True)
        # 确保 resume_data 不为 None
        resume_data = self._load_resume_state(state_file) or {}

        if not self.config.check_folders and not self.duplicate_result_file:
            if not resume_data.get('size_map_complete', False):
                # 文件扫描
                size_map = self._group_files_by_size(
                    progress_callback, 
                    total_files, 
                    processed_files,
                    state_file
                )
                if self.stop_event.is_set():
                    self.log_message("扫描已取消")
                    return []                   
                    
                self.log_message(f"按大小分组完成，发现 {len(size_map)} 组可能重复的文件")
                # 保存状态
                resume_data['size_map_complete'] = True
                self._save_resume_state(resume_data, state_file)
            else:
                # 从状态文件中恢复
                size_map = resume_data.get('size_map', {})
                self.log_message(f"恢复大小分组结果，组数: {len(size_map)}")
            
            # 检查暂停状态
            self._check_pause()
            self.log_message(f"🔍 扫描目标目录: {self.config.target_dirs}")
            
            hash_state_file = os.path.join(self.cache_dir, "hash_groups_state.json")
            hash_resume_data = self._load_resume_state(hash_state_file) or {}
            
            # 确保 size_map 总是被定义
            if not hash_resume_data.get('hash_groups_complete', False):
                self.duplicate_groups = self._find_duplicate_hashes(size_map, progress_callback, hash_state_file)
                if self.stop_event.is_set():
                    self.log_message("扫描已取消")
                    return []
                    
                # 保存状态
                hash_resume_data['hash_groups_complete'] = True
                self._save_resume_state(hash_resume_data, hash_state_file)
        else:
            # self.duplicate_groups = {}  # 清空文件扫描结果
            self.log_message("跳过文件扫描（启用了文件夹检测或已提供重复文件结果）")
        
        # 执行文件夹扫描（当 check_folders 启用且尚未扫描）
        if self.config.check_folders or self.duplicate_result_file:
            self.log_message("开始文件夹重复检测...")
            self._check_pause()
            self._scan_duplicate_folders(progress_callback)
            # 确保 duplicate_folders 不为 None
            self.duplicate_folders = self.duplicate_folders or {}
            self.log_message(f"发现 {len(self.duplicate_folders)} 组重复文件夹")
        
        self._save_scan_log()
        self.scan_results = self._generate_clean_list()

        # 扫描完成，删除状态文件
        for f in os.listdir(self.cache_dir):
            if os.path.isfile(os.path.join(self.cache_dir, f)):
                try:
                    os.remove(os.path.join(self.cache_dir, f))
                except:
                    pass
        
        self.log_message(f"扫描完成，发现重复文件组: {len(self.duplicate_groups)}")
        
        # 确保进度条达到100%
        if progress_callback:
            progress_callback(100)
            
        self.scan_active = False
        return self.scan_results
    
    def _load_resume_state(self, state_file: str) -> dict:
        """加载断点续扫状态"""
        if os.path.exists(state_file):
            try:
                with open(state_file, 'r') as f:
                    return json.load(f)
            except:
                self.log_message(f"加载状态文件失败: {state_file}")
                return {}
        self.log_message(f"状态文件不存在: {state_file}")
        return {}
    
    def _save_resume_state(self, data: dict, state_file: str):
        """保存断点续扫状态"""
        try:
            with open(state_file, 'w') as f:
                json.dump(data, f)
        except Exception as e:
            self.log_message(f"保存状态失败: {str(e)}")
    
    def _count_total_files(self) -> int:
        """计算所有目标目录中的文件总数"""
        count = 0
        for dir_path in self.config.target_dirs:
            try:
                # 每1000个文件检查一次内存
                if count % 1000 == 0 and self.check_memory_threshold():
                    self.log_message("内存保护机制激活，暂停后继续计数...")
                    
                for _, _, files in os.walk(dir_path):
                    count += len(files)
            except Exception as e:
                import traceback
                error_msg = f"目录遍历错误详情: {traceback.format_exc()}"
                self.log_message(error_msg)
                error_msg = f"无法遍历目录 {dir_path}: {e}"
                self.log_message(error_msg)
        return count
    
    def _group_files_by_size(self, progress_callback, total_files, processed_files, state_file: str) -> Dict[int, List[str]]:
        size_map = {}
        processed_files = 0
        
        for dir_path in self.config.target_dirs:
            allowed_files = self._recursive_scan(dir_path, progress_callback)
            for file_path in allowed_files:
                # 检查暂停状态
                if self.pause_event.is_set():
                    self._check_pause()
                
                # 定期检查内存
                if processed_files % 100 == 0 and self.check_memory_threshold():
                    self.log_message("内存保护机制激活，暂停后继续分组...")
                
                try:
                    # 新增：后缀排除过滤
                    if self.config.is_extension_excluded(file_path):
                        continue
                    
                    abs_path = os.path.abspath(file_path)  # 使用绝对路径
                    # 新增文件有效性验证
                    if os.path.exists(abs_path) and os.access(abs_path, os.R_OK):
                        size = os.path.getsize(file_path)
                        if size == 0: continue
                        size_map.setdefault(size, []).append(file_path)
                except (PermissionError, FileNotFoundError, OSError) as e:
                    error_msg = f"无法处理文件 {file_path}: {e}"
                    self.log_message(error_msg)
                finally:
                    processed_files += 1
                    # 更新进度
                    if progress_callback and total_files > 0:
                        progress = min(100, processed_files / total_files * 100)
                        progress_callback(progress)

                        
                # 每处理1000个文件保存一次状态
                if processed_files % 1000 == 0:
                    state_data = {'size_map': size_map, 'processed_files': processed_files}
                    self._save_resume_state(state_data, state_file)
                    self.log_message(f"保存文件大小分组状态，已处理: {processed_files}")
        
        return {size: files for size, files in size_map.items() if len(files) > 1}
    
    def _find_duplicate_hashes(self, size_map: dict, progress_callback=None, state_file: str = None) -> dict:
        """计算相同大小文件的哈希值，找出真正的重复文件"""
        hash_map = {}
        total_groups = len(size_map)
        processed_groups = 0
        
        # 使用线程池并行计算哈希值
        with ThreadPoolExecutor(max_workers=4) as executor:
            future_to_file = {}
            
            # 提交所有文件的哈希计算任务
            for size, files in size_map.items():
                if self.stop_event.is_set():
                    return {}
                
                # 检查暂停状态
                if self.pause_event.is_set():
                    self._check_pause()
                
                # 每组文件处理前检查内存
                if self.check_memory_threshold():
                    self.log_message("内存保护机制激活，暂停后继续哈希计算...")
                    
                for file_path in files:
                    # 跳过超大文件
                    if size > 500 * 1024 * 1024:  # 跳过500MB以上的大文件
                        self.log_message(f"⚠️ 跳过超大文件哈希计算: {file_path} ({size/1024/1024:.2f} MB)")
                        continue
                        
                    future = executor.submit(self._calculate_file_hash, file_path)
                    future_to_file[future] = file_path
                
                # 更新进度
                processed_groups += 1
                if progress_callback and total_groups > 0:
                    progress = processed_groups / total_groups * 100
                    progress_callback(progress)
                    
                # 每处理10组保存一次状态
                if processed_groups % 10 == 0:
                    state_data = {'hash_map': hash_map, 'processed_groups': processed_groups}
                    self._save_resume_state(state_data, state_file)
                    self.log_message(f"保存哈希计算状态，已处理: {processed_groups}/{total_groups}")
            
            # 收集结果
            for future in future_to_file:
                if self.stop_event.is_set():
                    return {}
                    
                try:
                    # 检查暂停状态
                    if self.pause_event.is_set():
                        self._check_pause()
                    
                    file_hash = future.result()
                    file_path = future_to_file[future]
                    if file_hash in hash_map:
                        hash_map[file_hash].append(file_path)
                    else:
                        hash_map[file_hash] = [file_path]
                except Exception as e:
                    error_msg = f"无法计算文件哈希: {e}"
                    self.log_message(error_msg)
        
        # 过滤掉没有重复哈希的文件
        return {file_hash: files for file_hash, files in hash_map.items() if len(files) > 1}
    
    def _calculate_file_hash(self, file_path: str, block_size=65536) -> str:
        """增强哈希计算的容错能力"""
        if not os.path.exists(file_path):  # 检查文件是否存在
            error_msg = f"⚠️ 文件不存在: {file_path}"
            self.log_message(error_msg)
            return ""
        if self.stop_event.is_set():
            return ""

        # 检查文件可读性
        if not os.access(file_path, os.R_OK):
            error_msg = f"⚠️ 跳过无法访问的文件: {file_path}"
            self.log_message(error_msg)
            return ""
        
        # 跳过浏览器缓存文件
        if "Cache_Data" in file_path or "Cache" in file_path:
            error_msg = f"⚠️ 跳过浏览器缓存文件: {file_path}"
            self.log_message(error_msg)
            return ""

        
        # 快速模式：使用文件属性哈希（大小、修改时间、文件名）
        if self.config.fast_mode:
            try:
                stat = os.stat(file_path)
                # 使用文件大小、修改时间和文件名计算哈希
                file_info = f"{stat.st_size}_{stat.st_mtime}_{os.path.basename(file_path)}"
                hasher = hashlib.md5()
                hasher.update(file_info.encode('utf-8'))
                return hasher.hexdigest()
            except Exception as e:
                error_msg = f"无法计算文件属性哈希: {file_path} - {str(e)}"
                self.log_message(error_msg)
                return ""

        hash_algo = self.config.hash_algorithm   
        try:
            for attempt in range(3):  # 新增重试机制
                try:
                    # 根据配置选择哈希算法
                    if hash_algo == 'md5':
                        hasher = hashlib.md5()
                    elif hash_algo == 'sha1':
                        hasher = hashlib.sha1()
                    elif hash_algo == 'sha256':
                        hasher = hashlib.sha256()
                    elif hash_algo == 'blake2':
                        hasher = hashlib.blake2b()
                    else:
                        hasher = hashlib.md5()
                    with open(file_path, 'rb') as f:
                         buf = f.read(block_size)
                         while buf:
                            hasher.update(buf)
                            buf = f.read(block_size)
                    return hasher.hexdigest()
                except OSError as e:
                    if attempt == 2: raise
            return hasher.hexdigest()  # 确保返回哈希值
        except (PermissionError, FileNotFoundError, OSError) as e:
            error_msg = f"无法计算文件 {file_path} 的哈希值: {e}"
            self.log_message(error_msg)
            return ""
        
    #####################

    def collect_dirs_and_depths(self, root_dir):
        """收集所有目录及其深度，并标记空目录"""
        root_dir = os.path.abspath(root_dir)
        dirs = [root_dir]
        depths = {root_dir: 0}
        is_empty = {}  # 记录目录是否为空
        
        # 深度优先遍历，确保子目录在父目录之前被处理
        for root, dirs_list, files_list in os.walk(root_dir, topdown=False):
            dir_path = os.path.abspath(root)
            
            # 计算深度
            rel_path = os.path.relpath(dir_path, root_dir)
            depth = rel_path.count(os.sep) + 1 if rel_path != '.' else 0
            depths[dir_path] = depth
            
            # 检查目录是否为空（无文件且无子目录）
            is_empty[dir_path] = len(files_list) == 0 and len(dirs_list) == 0
            
            # 添加到目录列表
            if dir_path != root_dir:
                dirs.append(dir_path)
        
        return dirs, depths, is_empty
    
    def calculate_file_hash(self,file_path):
        """计算文件内容的MD5哈希"""
        if not os.path.isfile(file_path):
            return "0"
        
        # 快速模式：使用文件属性哈希
        if self.config.fast_mode:
            try:
                stat = os.stat(file_path)
                # 使用文件大小、修改时间和文件名计算哈希
                file_info = f"{stat.st_size}_{stat.st_mtime}_{os.path.basename(file_path)}"
                hasher = hashlib.md5()
                hasher.update(file_info.encode('utf-8'))
                return hasher.hexdigest()
            except Exception:
                return "error"
        hasher = hashlib.md5()
        try:
            with open(file_path, 'rb') as f:
                while chunk := f.read(8192):
                    hasher.update(chunk)
            return hasher.hexdigest()
        except Exception:
            return "error"
    

    def calculate_dir_hash(self, dir_path, hashes, depths, is_empty):
        """计算目录的哈希值（基于文件和子目录），排除空目录"""
        # 如果目录为空，返回特殊标记
        if is_empty.get(dir_path, False):
            return "empty_directory"
        
        try:
            entries = os.listdir(dir_path)
        except PermissionError:
            return "permission_error"
        except Exception:
            return "error"
        
        file_hashes = []
        subdir_hashes = []
        
        for entry in entries:
            full_path = os.path.join(dir_path, entry)
            norm_full_path = os.path.normcase(os.path.abspath(full_path))  # 移动到这里
            
            if os.path.isfile(full_path):
                # 优先使用缓存中的文件哈希
                if norm_full_path in self.file_hash_cache:
                    file_hash = self.file_hash_cache[norm_full_path]
                    if file_hash != "error":
                        file_hashes.append(f"{entry}:{file_hash}")
                else:
                    # 只对未知文件计算哈希
                    file_hash = self.calculate_file_hash(full_path)
                    if file_hash != "error":
                        file_hashes.append(f"{entry}:{file_hash}")
                        # 添加到缓存供后续使用
                        self.file_hash_cache[norm_full_path] = file_hash
            elif os.path.isdir(full_path):
                # 只处理非空子目录
                if full_path in hashes and hashes[full_path] not in ("empty_directory", "permission_error", "error"):
                    subdir_hashes.append(f"{entry}:{hashes[full_path]}")
        
        # 如果目录中没有任何有效内容，标记为空
        if len(file_hashes) == 0 and len(subdir_hashes) == 0:
            return "empty_directory"
        
        file_hashes.sort()
        subdir_hashes.sort()
        combined = "|".join(file_hashes + subdir_hashes)
        return hashlib.md5(combined.encode()).hexdigest()


    def _scan_duplicate_folders(self, progress_callback=None):
        """扫描重复文件夹（增强版，包含大小统计）"""
        self.log_message("🔍 开始扫描重复文件夹...")

        # 从重复文件扫描结果构建文件哈希缓存
        self.file_hash_cache = {}
        for file_hash, files in self.duplicate_groups.items():
            for file_path in files:                
                self.file_hash_cache[os.path.normcase(os.path.abspath(file_path))] = file_hash
        self.duplicate_groups.clear()
        # 新增：计算文件夹大小的工具函数
        def get_folder_size(folder_path):
            """计算文件夹总大小（字节）"""
            total_size = 0
            try:
                for root, _, files in os.walk(folder_path):
                    for file in files:
                        file_path = os.path.join(root, file)
                        # 跳过无法访问的文件
                        if not os.access(file_path, os.R_OK):
                            continue
                        try:
                            total_size += os.path.getsize(file_path)
                        except (PermissionError, OSError) as e:
                            self.log_message(f"无法获取文件大小: {file_path}, 错误: {e}")
            except (PermissionError, OSError) as e:
                self.log_message(f"无法访问文件夹: {folder_path}, 错误: {e}")
            return total_size

        # 初始化集合收集所有目录
        all_dirs = set()
        depths = {}
        is_empty = {}
        folder_sizes = {}  # 新增：缓存文件夹大小，避免重复计算
        # 遍历所有目标目录
        for target_dir in self.config.target_dirs:
            try:
                # 收集单个目标目录的信息
                dirs_in_target, depths_in_target, is_empty_in_target = self.collect_dirs_and_depths(target_dir)
                # 合并结果
                all_dirs.update(dirs_in_target)
                depths.update(depths_in_target)
                is_empty.update(is_empty_in_target)
                
                # 提前计算并缓存文件夹大小
                for dir_path in dirs_in_target:
                    if dir_path not in folder_sizes:
                        folder_sizes[dir_path] = get_folder_size(dir_path)
                    
            except Exception as e:
                error_msg = f"处理目录 {target_dir} 时出错: {e}"
                self.log_message(error_msg)
        # 转换为列表并按深度排序
        dirs = list(all_dirs)
        dirs.sort(key=lambda d: depths[d], reverse=True)
        hashes = {}
        duplicate_groups = []  # 存储(深度, [目录列表])
        hash_to_dirs = {}
        
        total = len(dirs)
        for i, dir_path in enumerate(dirs):
            # 更新进度（如果有回调）
            if progress_callback:
                progress = min(99, (i / total) * 100)  # 防止超过100%
                progress_callback(progress)
                
            # 检查暂停状态
            if self.pause_event.is_set():
                self._check_pause()
                
            # 内存保护检查（每50个目录检查一次）
            if self.check_memory_threshold():
                self.log_message("内存保护机制激活，暂停后继续文件夹扫描...")
                # 更新进度回调（如果有）
                if progress_callback:
                    progress_callback(min(99, (i / total) * 100))
                continue                
            
            # 检查目录是否排除
            if self.config.is_dir_excluded(dir_path):
                self.log_message(f"跳过排除目录: {dir_path}")
                continue
                
            # 计算目录哈希
            hashes[dir_path] = self.calculate_dir_hash(dir_path, hashes, depths, is_empty)
            
            # 更新进度（额外步进）
            if progress_callback:
                progress = min(99, ((i + 0.5) / total) * 100)
                progress_callback(progress)
            h = hashes[dir_path]
            
            # 跳过空目录、权限错误和一般错误
            if h in ("empty_directory", "permission_error", "error"):
                continue

            # 更新进度（完成该目录）
            if progress_callback:
                progress = min(99, ((i + 1) / total) * 100)
                progress_callback(progress)
                
            if h not in hash_to_dirs:
                hash_to_dirs[h] = []
            hash_to_dirs[h].append(dir_path)
        
        # 按哈希分组，筛选重复组（至少2个非空目录）
        for h, paths in hash_to_dirs.items():
            if len(paths) >= 2:
                depth = depths[paths[0]]
                duplicate_groups.append((depth, paths))
        
        # 标记被覆盖的目录
        covered_dirs = set()
        duplicate_groups.sort(key=lambda x: x[0])  # 按深度升序
        for depth, dirs_list in duplicate_groups:
            for dir_path in dirs_list:
                for root, dirs, _ in os.walk(dir_path):
                    for d in dirs:
                        covered_dirs.add(os.path.abspath(os.path.join(root, d)))
        
        # 过滤最终结果，排除空目录和被覆盖的目录
        final_groups = []
        for depth, dirs_list in duplicate_groups:
            # 过滤掉空目录和被覆盖的目录
            filtered = [
                d for d in dirs_list 
                if not is_empty.get(d, True) and d not in covered_dirs
            ]
            if len(filtered) >= 2:
                final_groups.append(filtered)
        # 将列表转换为字典格式，使用索引作为group_id，同时计算大小
        self.duplicate_folders = {}
        for i, group in enumerate(final_groups):
            # 计算组内所有文件夹的总大小（实际可释放空间为单个文件夹大小 * (数量-1)）
            if group:  # 确保组不为空
                # 取第一个文件夹的大小作为基准（重复文件夹大小应相同）
                base_size = folder_sizes.get(group[0], 0)
                # 可释放空间 = 单个文件夹大小 * (重复数量 - 1)
                total_releasable_size = base_size * (len(group) - 1)
            else:
                total_releasable_size = 0
                
            self.duplicate_folders[i] = {
                "folders": group,
                "total_size": total_releasable_size,  # 填充实际计算的大小
                "signature": hashes.get(group[0], "") if group else ""
            }
        
        self.log_message(f"完成重复文件夹扫描，共发现 {len(self.duplicate_folders)} 组重复文件夹")  
    
    def _load_duplicate_groups_from_file(self, file_path: str):
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
                
            # 重置重复文件组
            self.duplicate_groups = {}
            
            # 加载文件分组
            for group in data.get('groups', []):
                file_hash = group.get('hash', '')
                if file_hash:
                    self.duplicate_groups[file_hash] = group.get('files', [])
            
            self.log_message(f"成功加载 {len(self.duplicate_groups)} 组重复文件")
            
            # 从重复文件中提取所有目录（包括父目录）
            all_dirs = set()
            for files in self.duplicate_groups.values():
                for file_path in files:
                    # 获取文件所在目录及其所有父目录
                    current_dir = os.path.dirname(file_path)
                    # 只添加最底层的父目录（文件所在的直接目录）
                    all_dirs.add(current_dir)
            
            # 递归合并目录到最深的共同父目录
            merged_dirs = self.recursive_merge_dirs(all_dirs)
            
            self.config.target_dirs.extend(list(merged_dirs))
            self.log_message(f"添加了 {len(merged_dirs)} 个合并后的目录用于文件夹扫描:")
            for i, dir_path in enumerate(merged_dirs, 1):
                self.log_message(f"{i}. {dir_path}")            
            
        except Exception as e:
            error_msg = f"加载重复文件结果失败: {str(e)}"
            self.log_message(error_msg)

    def find_common_path(self, paths: List[str]) -> str:
        """更精确的查找最长公共路径"""
        if not paths:
            return ""
        
        # 转换为Path对象
        path_objs = [Path(p) for p in paths]
        
        # 查找公共父路径
        common_path = path_objs[0]
        for path in path_objs[1:]:
            # 尝试找到当前路径与公共路径的交集
            try:
                path.relative_to(common_path)
            except ValueError:
                # 如果不在共同路径下，向上回溯父目录
                while common_path != common_path.parent:
                    try:
                        path.relative_to(common_path)
                        break
                    except ValueError:
                        common_path = common_path.parent
                        
        # 确保公共前缀是有效的目录路径
        if not common_path.is_dir():
            # 如果不是完整目录路径，尝试回退到上一级目录
            common_path = common_path.parent
            
        return str(common_path)

    def recursive_merge_dirs(self, dirs: set) -> set:
        """递归合并目录到最深的共同父目录"""
        # 如果目录数量为1或0，直接返回
        if len(dirs) <= 1:
            return dirs
        
        # 将路径转换为Path对象并标准化
        dir_paths = [Path(d).resolve() for d in dirs]
        
        # 尝试找到所有路径的最长公共路径
        common_path = self.find_common_path(dir_paths)
        if common_path and common_path != Path("/"):
            return {str(common_path)}
        
        # 使用更精确的公共路径查找方法
        common_path = self.find_common_path(list(dirs))
        return {common_path}
    
    def find_common_path(self, paths: List[Path]) -> Optional[Path]:
        """更精确的查找最长公共路径"""
        if not paths:
            return None
            
        # 查找所有路径的最长公共前缀
        common_path = paths[0].parent
        for path in paths[1:]:
            # 尝试找到当前路径与公共路径的交集
            try:
                relative = path.relative_to(common_path)
            except ValueError:
                # 如果不在共同路径下，向上回溯父目录
                while common_path != common_path.parent:
                    try:
                        path.relative_to(common_path)
                        break
                    except ValueError:
                        common_path = common_path.parent
        return common_path
    
    def _calculate_folder_signature(self, dir_path):
        """增强版文件夹签名算法，考虑整个文件夹结构"""
        try:
            # 获取目录下所有文件和子目录的相对路径和大小
            all_items = []
            for root, dirs, files in os.walk(dir_path):
                rel_path = os.path.relpath(root, dir_path)
                if rel_path == ".":
                    rel_path = ""
                
                # 添加文件信息
                for file in files:
                    file_path = os.path.join(root, file)
                    try:
                        size = os.path.getsize(file_path)
                        all_items.append(f"F:{rel_path}/{file}:{size}")
                    except:
                        continue
                
                # 添加目录信息（只记录目录名）
                for dir_name in dirs:
                    all_items.append(f"D:{rel_path}/{dir_name}")
            
            # 如果没有内容，返回None
            if not all_items:
                return None
            
            # 排序并生成签名
            all_items.sort()
            signature_str = ";".join(all_items)
            return hashlib.md5(signature_str.encode('utf-8')).hexdigest()
        except Exception as e:
            self.log_message(f"计算文件夹签名失败: {dir_path}, {str(e)}")
            return None
    
    def _find_outer_duplicate(self, current_dir, existing_dir, duplicate_folders):
        """
        查找最外层重复目录
        1. 检查当前目录是否已经是已知重复组的子目录
        2. 如果不是，检查父目录是否与现有目录的父目录相同
        """
        # 检查当前目录是否已经是某个重复组的子目录
        for outer_dir, folders in duplicate_folders.items():
            if any(current_dir.startswith(f) for f in folders):
                return None  # 已经是子目录，不重复添加
        
        # 检查当前目录的父目录是否与现有目录的父目录相同
        current_parent = os.path.dirname(current_dir)
        existing_parent = os.path.dirname(existing_dir)
        
        if current_parent == existing_parent:
            # 在同一父目录下，直接使用父目录作为最外层
            return current_parent
        
        # 检查父目录是否重复
        parent_signature = self._calculate_folder_signature(current_parent)
        if parent_signature:
            existing_parent_signature = self._calculate_folder_signature(existing_parent)
            if parent_signature == existing_parent_signature:
                # 父目录也重复，递归查找最外层
                return self._find_outer_duplicate(current_parent, existing_parent, duplicate_folders)
        
        # 没有找到更外层的重复，使用当前目录作为最外层
        return current_dir

    def _save_scan_log(self):
        """保存扫描日志"""
        pass  # 简化实现，实际应保存扫描日志
    
    def _generate_clean_list(self) -> List[str]:
        """生成可清理的项目列表（保留最新的文件/文件夹）"""
        clean_list = []
        
        # 当提供了重复文件结果时，跳过文件清理列表的生成
        if not self.duplicate_result_file:
            # 添加重复文件（保留最新的一个）
            for files in self.duplicate_groups.values():
                # 按修改时间排序，最新的文件排在前面
                try: 
                    files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
                    # 保留第一个（最新的），其余添加到清理列表
                    clean_list.extend(files[1:])
                except Exception as e:
                    error_msg = f"排序文件时出错: {e}"
                    self.log_message(error_msg)
            
            # 添加重复文件夹（保留最新的一个）
            if hasattr(self, 'duplicate_folders'):
                for group_id, group_info in self.duplicate_folders.items():
                    try:
                        folders = group_info["folders"]
                        # 按修改时间排序，最新的文件夹排在前面
                        folders.sort(key=lambda x: os.path.getmtime(x), reverse=True)
                        # 保留第一个（最新的），其余添加到清理列表
                        clean_list.extend(folders[1:])
                    except Exception as e:
                        error_msg = f"排序文件夹时出错: {e}"
                        self.log_message(error_msg)
            
            return clean_list
    
    def save_scan_results(self, scan_type: str) -> dict:
        """保存详细的扫描结果"""
        from pathlib import Path
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        os.makedirs('scan_results', exist_ok=True)
        filename = f"scan_results/{scan_type}_{timestamp}.json"
        
        # 保存分组数据（每个分组包含文件列表）
        groups = []
        for file_hash, files in self.duplicate_groups.items():
            # 规范化所有路径
            normalized_files = []
            for f in files:
                normalized_path = str(Path(f))  # 保持原始路径格式
                normalized_files.append(normalized_path)
            
            try:
                groups.append({
                    "hash": file_hash,
                    "files": normalized_files,  # 保存规范化后的路径
                    "count": len(files),
                    "total_size": sum(os.path.getsize(f) for f in files if os.path.exists(f))
                })
            except Exception as e:
                error_msg = f"保存扫描结果时出错: {e}"
                self.log_message(error_msg)
        
        # 保存文件夹数据
        folder_groups = []
        if hasattr(self, 'duplicate_folders'):
            for group_id, group_info in self.duplicate_folders.items():
                try:
                    # 规范化文件夹路径
                    normalized_folders = []
                    for folder in group_info["folders"]:
                        normalized_path = str(Path(folder))  # 保持原始路径格式
                        normalized_folders.append(normalized_path)
                    
                    folder_groups.append({
                        "group_id": group_id,  # 唯一组标识
                        "folders": normalized_folders,
                        "count": len(group_info["folders"]),
                        "total_size": group_info["total_size"],
                        "signature": group_info.get("signature", "")
                    })
                except Exception as e:
                    error_msg = f"保存文件夹扫描结果时出错: {e}"
                    self.log_message(error_msg)
        
        results = { # 使用绝对路径保存
            'timestamp': timestamp,
            'scan_type': scan_type,
            'system': self.config.system_type,
            'target_dirs': self.config.target_dirs,
            'groups': groups,  # 保存文件分组信息
            'folder_groups': folder_groups,  # 保存文件夹分组信息
            'clean_list': self.scan_results  # 保存清理列表
        }
        
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(results, f, indent=2, ensure_ascii=False)  # 允许保存非ASCII字符
            
        return results

class PackageCleaner(BaseCleaner):
    """软件包清理器，扫描并清理安装包和临时文件"""
    
    def __init__(self, config: CleanerConfig):
        super().__init__(config)
        self.stop_event = threading.Event()  # 用于停止扫描的事件
        self.large_file_threshold = 100 * 1024 * 1024  # 100MB以上视为大文件


    def save_scan_results(self, scan_type: str) -> dict:
        """返回扫描结果字典，不直接保存文件"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        results = {
            'timestamp': timestamp,
            'scan_type': scan_type,
            'system': self.config.system_type,
            'target_dirs': self.config.target_dirs,
            'files': self.scan_results  # 直接保存字典列表
        }
        return results
        
    def scan(self, progress_callback=None) -> List[dict]:
        package_ext = self.config.get_all_package_extensions()

        results = []
        
        allowed_files = []
        for dir_path in self.config.target_dirs:
            allowed_files.extend(self._recursive_scan(dir_path, progress_callback))

        for i, file_path in enumerate(allowed_files):
            # 检查暂停状态
            if self.pause_event.is_set():
                self._check_pause()
                i -= 1  # 保持索引不变
                continue
                
            # 每100个文件检查一次内存
            if i % 100 == 0 and self.check_memory_threshold():
                self.log_message("内存保护机制激活，暂停后继续软件包扫描...")
                
            if '.' in file_path:
                file_ext = file_path.split('.')[-1].lower()
                if file_ext in package_ext:
                    try:
                        if os.path.exists(file_path) and os.access(file_path, os.R_OK):
                            size = os.path.getsize(file_path)
                            
                            # 跳过过大文件
                            if size > self.large_file_threshold:
                                self.log_message(f"⚠️ 跳过大软件包: {file_path} ({size/1024/1024:.2f} MB)")
                                continue
                                
                            mtime = os.path.getmtime(file_path)
                            results.append({
                                'path': file_path,
                                'size': size,
                                'mtime': self.validate_timestamp(mtime)
                            })                                                                 
                            # 添加调试日志（关键）
                            self.log_message(f"扫描到软件包: {file_path} (扩展名: {file_ext})")
                    except Exception as e:
                            error_msg = f"获取文件信息失败: {file_path}, 错误: {e}"
                            self.log_message(error_msg)
                        
                else:
                    continue
        
        self.scan_results = results
        return results
    
    def validate_timestamp(self, ts: float) -> float:
        """验证时间戳有效性"""
        min_ts = 0  # 1970-01-01
        max_ts = datetime.now().timestamp() + 86400*365*10  # 当前时间+10年
        
        try:
            ts = float(ts)
            if min_ts <= ts <= max_ts:
                return ts
            else:
                error_msg = f"无效时间戳 {ts}，已重置为当前时间"
                self.log_message(error_msg)
                return datetime.now().timestamp()
        except (TypeError, ValueError):
            error_msg = f"非数值时间戳 {ts}，已重置为当前时间"
            self.log_message(error_msg)
            return datetime.now().timestamp()
    
    def stop(self):
        """停止正在进行的扫描"""
        self.stop_event.set()
    
    def _count_total_files(self) -> int:
        """计算所有目标目录中的文件总数"""
        count = 0
        for dir_path in self.config.target_dirs:
            try:
                for _, _, files in os.walk(dir_path):
                    count += len(files)
            except Exception as e:
                error_msg = f"无法遍历目录 {dir_path}: {e}"
                self.log_message(error_msg)
        return count