#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
文件清理核心引擎
负责所有扫描和删除逻辑
"""

import os
import hashlib
import shutil
import subprocess
import sys
from pathlib import Path
from collections import defaultdict
from typing import Dict, List, Set, Tuple, Optional, Any


class FileCleanerCore:
    """文件清理核心引擎"""
    
    def __init__(self):
        self.temp_extensions = {
            '.tmp', '.temp', '.bak', '.old', '.cache', '.log',
            '.~tmp', '.backup', '.swp', '.swo', '.swn', '.orig',
            '.rej', '.chk', '.gid', '.dir', '.fts', '.ftg', '.aps',
            '.pdb', '.ilk', '.sbr', '.bsc', '.ncb', '.opt', '.plg',
            '.obj', '.pch', '.idb', '.res', '.tlb', '.tli', '.tlh',
            '.rsp', '.exp', '.lib', '.map', '.pdb', '.~df', '.$$$',
            '.dmp', '.mdmp', '.hdmp', '.stackdump'
        }
        
        self.temp_dirs = {
            'temp', 'tmp', 'cache', 'logs', '__pycache__',
            '.pytest_cache', '.coverage', '.tox', '.mypy_cache',
            'node_modules/.cache', '.next', '.nuxt', 'dist/cache',
            'build/temp', 'out/cache', 'target/debug',
            'Thumbs.db', '.DS_Store', '.localized'
        }
        
        self.dev_indicators = {
            # Java项目
            'pom.xml': 'Maven Java',
            'build.gradle': 'Gradle Java',
            'build.gradle.kts': 'Gradle Kotlin',
            'gradlew': 'Gradle Java',
            'gradlew.bat': 'Gradle Java',
            'settings.gradle': 'Gradle Java',
            'ivy.xml': 'Ivy Java',
            'build.xml': 'Ant Java',
            'project.clj': 'Leiningen Clojure',
            'deps.edn': 'Clojure CLI',
            
            # 前端项目
            'package.json': 'Node.js/前端',
            'package-lock.json': 'npm前端',
            'yarn.lock': 'Yarn前端',
            'pnpm-lock.yaml': 'pnpm前端',
            'webpack.config.js': 'Webpack前端',
            'webpack.config.ts': 'Webpack TypeScript',
            'vite.config.js': 'Vite前端',
            'vite.config.ts': 'Vite TypeScript',
            'rollup.config.js': 'Rollup前端',
            'gulpfile.js': 'Gulp前端',
            'gruntfile.js': 'Grunt前端',
            'angular.json': 'Angular',
            'nuxt.config.js': 'Nuxt.js',
            'next.config.js': 'Next.js',
            'svelte.config.js': 'Svelte',
            'vue.config.js': 'Vue.js',
            'quasar.config.js': 'Quasar Vue',
            'remix.config.js': 'Remix',
            'astro.config.mjs': 'Astro',
            'gatsby-config.js': 'Gatsby',
            'ember-cli-build.js': 'Ember.js',
            'ionic.config.json': 'Ionic',
            'capacitor.config.json': 'Capacitor',
            'cordova.config.xml': 'Cordova',
            'react-native.config.js': 'React Native',
            'metro.config.js': 'React Native Metro',
            'expo.json': 'Expo React Native',
            'app.json': 'Expo/React Native',
            'tsconfig.json': 'TypeScript',
            'jsconfig.json': 'JavaScript配置',
            'babel.config.js': 'Babel',
            '.babelrc': 'Babel',
            'postcss.config.js': 'PostCSS',
            'tailwind.config.js': 'Tailwind CSS',
            'playwright.config.js': 'Playwright测试',
            'cypress.config.js': 'Cypress测试',
            'jest.config.js': 'Jest测试',
            'vitest.config.js': 'Vitest测试',
            '.eslintrc.js': 'ESLint',
            '.eslintrc.json': 'ESLint',
            'prettier.config.js': 'Prettier',
            '.prettierrc': 'Prettier',
            'stylelint.config.js': 'Stylelint',
            
            # 其他项目类型
            'Cargo.toml': 'Rust',
            'requirements.txt': 'Python',
            'setup.py': 'Python',
            'pyproject.toml': 'Python Modern',
            'Pipfile': 'Python Pipenv',
            'poetry.lock': 'Python Poetry',
            'go.mod': 'Go',
            'go.sum': 'Go Modules',
            'Gemfile': 'Ruby',
            'Gemfile.lock': 'Ruby Bundler',
            'composer.json': 'PHP Composer',
            'composer.lock': 'PHP Composer',
            '.sln': 'Visual Studio',
            '.csproj': 'C#/.NET',
            '.fsproj': 'F#/.NET',
            '.vbproj': 'VB.NET',
            'project.json': '.NET Core',
            'Makefile': 'C/C++ Make',
            'CMakeLists.txt': 'CMake C/C++',
            'configure.ac': 'Autotools',
            'meson.build': 'Meson',
            'SConstruct': 'SCons',
            'Dockerfile': 'Docker',
            'docker-compose.yml': 'Docker Compose',
            'docker-compose.yaml': 'Docker Compose',
            'Vagrantfile': 'Vagrant',
            'terraform.tf': 'Terraform',
            'serverless.yml': 'Serverless',
            'cloudformation.yaml': 'AWS CloudFormation',
            'helm-chart.yaml': 'Helm Chart'
        }
        
        self.scan_strategies = {
            'always_skip': {
                'patterns': {
                    # Node.js/前端相关
                    'node_modules',
                    '.next',
                    '.nuxt', 
                    'dist',
                    'build',
                    'coverage',
                    '.nyc_output',
                    'storybook-static',
                    '.cache',
                    '.parcel-cache',
                    '.vite',
                    '.turbo',
                    
                    # Java相关
                    'target',
                    'build',
                    'out',
                    'bin',
                    'classes',
                    '.gradle',
                    '.m2',
                    '.ivy2',
                    'gradle',
                    
                    # 通用开发工具
                    '.git',
                    '.svn',
                    '.hg',
                    '.vs',
                    '.vscode',
                    '.idea',
                    '.eclipse',
                    '.metadata',
                    'workspace',
                    
                    # Python相关
                    'venv',
                    'env',
                    '__pycache__',
                    '.pytest_cache',
                    '.mypy_cache',
                    '.tox',
                    'htmlcov',
                    
                    # 其他
                    'obj',
                    'Temp',
                    'temp',
                    'cache',
                    'logs',
                    'log',
                    '.log',
                    'debug',
                    'Debug',
                    'release',
                    'Release',
                    'tmp',
                    '.tmp'
                }
            },
            
            'smart_skip': {
                'patterns': {
                    '*.exe', '*.dll', '*.so', '*.dylib',  
                    '*.zip', '*.tar', '*.gz', '*.7z', '*.rar',  
                    '*.mp4', '*.avi', '*.mkv', '*.mov',  
                    '*.mp3', '*.wav', '*.flac',  
                    '*.jpg', '*.png', '*.gif', '*.bmp', '*.svg'  
                }
            }
        }

    def scan_temp_files(self, root_path: str, progress_callback=None, status_callback=None) -> List[str]:
        """扫描临时文件"""
        temp_files = []
        total_checked = 0
        
        try:
            for root, dirs, files in self.smart_walk(root_path, max_depth=None, enable_smart_skip=False):
                dirs[:] = [d for d in dirs if not self._should_skip_directory(d)]
                
                for file in files:
                    if progress_callback and total_checked % 100 == 0:
                        progress_callback(min(90, (total_checked / 10000) * 100))
                    
                    total_checked += 1
                    file_path = os.path.join(root, file)
                    
                    if self._is_temp_file(file, file_path):
                        temp_files.append(file_path)
                        if status_callback:
                            status_callback(f"找到临时文件: {len(temp_files)} 个")
            
            if progress_callback:
                progress_callback(100)
                
        except Exception as e:
            if status_callback:
                status_callback(f"扫描出现错误: {str(e)}")
        
        return temp_files

    def smart_walk(self, root_path: str, max_depth: int = None, enable_smart_skip: bool = True):
        """智能目录遍历"""
        def _walk_recursive(current_path: str, current_depth: int = 0):
            if max_depth is not None and current_depth > max_depth:
                return
            
            try:
                items = os.listdir(current_path)
                dirs = []
                files = []
                
                for item in items:
                    item_path = os.path.join(current_path, item)
                    try:
                        if os.path.isdir(item_path):
                            if enable_smart_skip and self._should_always_skip(item):
                                continue
                            dirs.append(item)
                        else:
                            files.append(item)
                    except (OSError, PermissionError):
                        continue
                
                yield current_path, dirs, files
                
                for dir_name in dirs:
                    dir_path = os.path.join(current_path, dir_name)
                    try:
                        yield from _walk_recursive(dir_path, current_depth + 1)
                    except (OSError, PermissionError, RecursionError):
                        continue
                        
            except (OSError, PermissionError):
                return
        
        yield from _walk_recursive(root_path)

    def find_duplicate_files(self, root_path: str, progress_callback=None, status_callback=None) -> Dict[str, List[str]]:
        """查找重复文件"""
        file_hashes = defaultdict(list)
        total_files = 0
        
        try:
            for root, dirs, files in self.smart_walk(root_path, max_depth=None, enable_smart_skip=False):
                dirs[:] = [d for d in dirs if not self._should_skip_directory(d)]
                
                for file in files:
                    if progress_callback and total_files % 50 == 0:
                        progress_callback(min(90, (total_files / 5000) * 100))
                    
                    total_files += 1
                    file_path = os.path.join(root, file)
                    
                    try:
                        if os.path.getsize(file_path) < 1024:  
                            continue
                            
                        file_hash = self._get_file_hash(file_path)
                        if file_hash:
                            file_hashes[file_hash].append(file_path)
                            
                        if status_callback:
                            duplicates_count = sum(1 for files in file_hashes.values() if len(files) > 1)
                            status_callback(f"已检查 {total_files} 个文件，发现 {duplicates_count} 组重复")
                            
                    except (OSError, PermissionError):
                        continue
            
            duplicates = {hash_val: files for hash_val, files in file_hashes.items() if len(files) > 1}
            
            if progress_callback:
                progress_callback(100)
                
        except Exception as e:
            if status_callback:
                status_callback(f"扫描出现错误: {str(e)}")
            duplicates = {}
        
        return duplicates

    def find_large_files(self, root_path: str, progress_callback=None, status_callback=None) -> List[Tuple[str, int]]:
        """查找大文件（超过50MB）"""
        large_files = []
        min_size = 50 * 1024 * 1024  
        total_checked = 0
        
        try:
            for root, dirs, files in self.smart_walk(root_path, max_depth=None, enable_smart_skip=False):
                dirs[:] = [d for d in dirs if not self._should_skip_directory(d)]
                
                for file in files:
                    if progress_callback and total_checked % 100 == 0:
                        progress_callback(min(90, (total_checked / 8000) * 100))
                    
                    total_checked += 1
                    file_path = os.path.join(root, file)
                    
                    try:
                        file_size = os.path.getsize(file_path)
                        if file_size >= min_size:
                            large_files.append((file_path, file_size))
                            if status_callback:
                                status_callback(f"找到大文件: {len(large_files)} 个")
                    except (OSError, PermissionError):
                        continue
            
            large_files.sort(key=lambda x: x[1], reverse=True)
            
            if progress_callback:
                progress_callback(100)
                
        except Exception as e:
            if status_callback:
                status_callback(f"扫描出现错误: {str(e)}")
        
        return large_files

    def scan_dev_directories(self, root_path: str, progress_callback=None, status_callback=None) -> List[Tuple[str, str, int, Dict[str, Any]]]:
        """扫描开发者目录并分析内容分类"""
        dev_dirs = []
        total_checked = 0
        
        try:
            if status_callback:
                status_callback("正在扫描开发目录...")
                
            for root, dirs, files in self.smart_walk(root_path, max_depth=None, enable_smart_skip=False):
                # 更积极的目录过滤
                dirs[:] = [d for d in dirs if not self._should_skip_directory(d)]
                
                total_checked += 1
                if progress_callback and total_checked % 20 == 0:  # 更频繁的进度更新
                    progress_callback(min(85, (total_checked / 2000) * 100))
                
                dir_name = os.path.basename(root)
                
                # 快速跳过明显不是项目的目录
                if self._should_always_skip(dir_name):
                    continue
                
                project_type = self._identify_dev_directory(root, dir_name)
                
                if project_type:
                    try:
                        if status_callback:
                            status_callback(f"正在分析项目: {dir_name} ({project_type})")
                        
                        # 分析项目内容分类
                        content_analysis = self._analyze_project_content(root, project_type)
                        
                        # 使用改进的大小计算，包含所有目录
                        try:
                            root_files = os.listdir(root)
                            if 'node_modules' in root_files or len(root_files) > 2000:
                                # 大型项目使用更大的采样，包含构建目录
                                dir_size = self._get_directory_size(root, max_files=3000, include_all=True)
                            else:
                                # 中小型项目进行完整扫描
                                dir_size = self._get_directory_size(root, max_files=8000, include_all=True)
                        except OSError:
                            dir_size = 0  # 无法访问时设为0
                            
                        dev_dirs.append((root, project_type, dir_size, content_analysis))
                        if status_callback:
                            status_callback(f"找到开发目录: {len(dev_dirs)} 个 - 最新: {project_type}")
                    except (OSError, PermissionError):
                        continue
            
            dev_dirs.sort(key=lambda x: x[2], reverse=True)
            
            if progress_callback:
                progress_callback(100)
                
        except Exception as e:
            if status_callback:
                status_callback(f"扫描出现错误: {str(e)}")
        
        return dev_dirs

    def _identify_dev_directory(self, dir_path: str, dir_name: str) -> str:
        """识别开发目录类型 - 改进版本，避免识别子目录为独立项目"""
        try:
            # 常见的项目子目录，不应该被识别为独立项目
            common_subdirs = {
                'src', 'source', 'sources', 'lib', 'libs', 'include', 'includes',
                'main', 'app', 'components', 'pages', 'views', 'assets', 'public',
                'static', 'resources', 'test', 'tests', '__tests__', 'spec', 'specs',
                'docs', 'doc', 'documentation', 'examples', 'example', 'sample', 'samples',
                'build', 'dist', 'target', 'out', 'output', 'bin', 'obj', 'tmp', 'temp',
                'node_modules', '.git', '.svn', '.idea', '.vscode', '.gradle', '.settings',
                'config', 'configs', 'conf', 'settings', 'scripts', 'tools', 'utils', 'util',
                'web', 'www', 'client', 'server', 'frontend', 'backend', 'api', 'service',
                'modules', 'packages', 'plugins', 'extensions', 'widgets', 'data'
            }
            
            # 如果是常见子目录，执行更严格的检查
            if dir_name.lower() in common_subdirs:
                # 检查父目录（最多检查3级父目录）
                current_path = dir_path
                for _ in range(3):
                    parent_dir = os.path.dirname(current_path)
                    if parent_dir == current_path or not parent_dir:
                        break
                    
                    # 检查父目录是否有项目标识文件
                    parent_project_type = self._check_project_indicators(parent_dir)
                    if parent_project_type:
                        # 如果在上级目录找到项目标识，当前目录不是独立项目
                        return ""
                    
                    current_path = parent_dir
            
            # 检查当前目录是否是项目根目录
            project_type = self._check_project_indicators(dir_path)
            if project_type:
                # 额外验证：确保这不是一个深层子目录
                path_parts = dir_path.split(os.sep)
                
                # 如果路径过深，可能是子目录
                if len(path_parts) > 6:
                    # 检查是否在某个已知项目类型的子目录中
                    for i in range(len(path_parts) - 1, max(0, len(path_parts) - 4), -1):
                        ancestor_path = os.sep.join(path_parts[:i])
                        if ancestor_path and os.path.exists(ancestor_path):
                            ancestor_project = self._check_project_indicators(ancestor_path)
                            if ancestor_project:
                                # 在祖先目录找到项目标识，当前目录可能是子目录
                                return ""
                
                return self._get_project_type_with_context(project_type, dir_name, dir_path)
            
            return ""
            
        except (OSError, PermissionError):
            pass
        
        return ""
    
    def _check_project_indicators(self, dir_path: str) -> str:
        """检查目录中的项目指示文件"""
        try:
            files_in_dir = set(os.listdir(dir_path)[:100])  # 只检查前100个文件
        except OSError:
            return ""
        
        # 优先检查最常见的项目标识文件
        priority_indicators = ['package.json', 'pom.xml', 'build.gradle', 'Cargo.toml', 'go.mod', 'requirements.txt']
        
        # 先检查优先级高的指示文件
        for indicator in priority_indicators:
            if indicator in files_in_dir:
                return indicator
        
        # 再检查其他指示文件
        for indicator in self.dev_indicators:
            if indicator not in priority_indicators and indicator in files_in_dir:
                return indicator
        
        return ""
    
    def _get_project_type_with_context(self, indicator: str, dir_name: str, dir_path: str) -> str:
        """根据指示文件和上下文获取项目类型"""
        # 基本项目类型
        project_type = self.dev_indicators.get(indicator, 'Unknown')
        
        # 进一步细化项目类型
        if project_type == 'Node.js/前端':
            # 检查package.json内容来进一步确定前端框架
            try:
                package_json_path = os.path.join(dir_path, 'package.json')
                if os.path.exists(package_json_path):
                    with open(package_json_path, 'r', encoding='utf-8') as f:
                        import json
                        package_data = json.load(f)
                        dependencies = package_data.get('dependencies', {})
                        dev_dependencies = package_data.get('devDependencies', {})
                        all_deps = {**dependencies, **dev_dependencies}
                        
                        if 'react' in all_deps:
                            return 'React项目'
                        elif 'vue' in all_deps:
                            return 'Vue项目'
                        elif '@angular/core' in all_deps:
                            return 'Angular项目'
                        elif 'next' in all_deps:
                            return 'Next.js项目'
                        elif 'nuxt' in all_deps:
                            return 'Nuxt.js项目'
                        elif 'svelte' in all_deps:
                            return 'Svelte项目'
            except:
                pass
        
        # 对于一些特殊情况，可以根据目录名进一步细化
        if project_type == 'Maven Java' and 'spring' in dir_name.lower():
            return 'Spring Boot Maven'
        elif project_type == 'Gradle Java' and 'spring' in dir_name.lower():
            return 'Spring Boot Gradle'
        
        return project_type


    def _get_directory_size(self, dir_path: str, max_files: int = 5000, include_all: bool = False) -> int:
        """获取目录大小（字节）- 改进版本，更准确的大小统计"""
        total_size = 0
        file_count = 0
        processed_dirs = 0
        
        try:
            for root, dirs, files in os.walk(dir_path):
                processed_dirs += 1
                
                # 只在非include_all模式下跳过某些目录
                if not include_all:
                    # 只跳过真正的系统目录和版本控制目录
                    dirs[:] = [d for d in dirs if not self._should_skip_for_size_calc(d)]
                
                # 处理当前目录的所有文件
                for file in files:
                    if file_count >= max_files:
                        # 改进的估算逻辑 - 基于已处理的目录
                        if processed_dirs > 0 and file_count > 0:
                            avg_size_per_file = total_size / file_count
                            avg_files_per_dir = file_count / processed_dirs
                            # 估算还有相同数量的目录未处理
                            estimated_remaining_dirs = processed_dirs
                            estimated_remaining_size = estimated_remaining_dirs * avg_files_per_dir * avg_size_per_file
                            total_size += estimated_remaining_size
                        return int(total_size)
                    
                    try:
                        file_path = os.path.join(root, file)
                        file_size = os.path.getsize(file_path)
                        total_size += file_size
                        file_count += 1
                    except (OSError, PermissionError):
                        continue
                
                # 每处理100个目录检查一次大小
                if processed_dirs % 100 == 0 and total_size > 20 * 1024 * 1024 * 1024:  # 20GB
                    # 如果已经很大，估算剩余部分并返回
                    if processed_dirs > 0:
                        avg_size_per_dir = total_size / processed_dirs
                        # 保守估算还有一半的目录未处理
                        estimated_remaining = processed_dirs * 0.5 * avg_size_per_dir
                        total_size += estimated_remaining
                    return int(total_size)
                        
        except (OSError, PermissionError):
            pass
            
        return int(total_size)
    
    def _should_skip_for_size_calc(self, dir_name: str) -> bool:
        """在大小计算时应该跳过的目录（更保守的跳过策略）"""
        # 只跳过真正的系统目录，不跳过构建产物目录
        system_only_skip = {
            '.git', '.svn', '.hg',  # 版本控制
            'System Volume Information', '$Recycle.Bin',  # Windows系统
            '.Trash', '.Trashes',  # macOS系统
            'proc', 'sys', 'dev',  # Linux系统
            '__pycache__', '.pytest_cache', '.mypy_cache',  # Python缓存
            '.tox', '.coverage'  # Python工具缓存
        }
        return dir_name in system_only_skip

    def _get_file_hash(self, file_path: str, chunk_size: int = 8192) -> str:
        """计算文件哈希值"""
        try:
            hash_obj = hashlib.md5()
            with open(file_path, 'rb') as f:
                while chunk := f.read(chunk_size):
                    hash_obj.update(chunk)
            return hash_obj.hexdigest()
        except (OSError, PermissionError, IOError):
            return None

    def delete_files(self, file_paths: List[str], log_callback=None) -> Tuple[int, int]:
        """删除文件列表"""
        success_count = 0
        error_count = 0
        
        for file_path in file_paths:
            try:
                if os.path.isfile(file_path):
                    os.remove(file_path)
                    if log_callback:
                        log_callback(f"✅ 已删除文件: {file_path}", 'success')
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
                    if log_callback:
                        log_callback(f"✅ 已删除目录: {file_path}", 'success')
                success_count += 1
            except Exception as e:
                error_count += 1
                if log_callback:
                    log_callback(f"❌ 删除失败: {file_path} - {str(e)}", 'error')
        
        return success_count, error_count

    def build_file_tree(self, file_paths: List[str]) -> Dict[str, Any]:
        """构建文件树结构"""
        tree = {}
        
        for file_path in file_paths:
            parts = Path(file_path).parts
            current = tree
            
            for part in parts[:-1]:  
                if part not in current:
                    current[part] = {}
                current = current[part]
            
            filename = parts[-1]
            current[filename] = file_path
        
        return tree

    @staticmethod
    def open_file_location(file_path: str, log_callback=None) -> bool:
        """在资源管理器中打开文件位置并选中文件"""
        def log(msg, level='info'):
            if log_callback:
                log_callback(msg, level)
        
        try:
            # 标准化路径格式
            normalized_path = os.path.normpath(file_path)
            log(f"[DEBUG] 标准化后路径: {normalized_path}", 'info')
            
            if not os.path.exists(normalized_path):
                log(f"[ERROR] 路径不存在: {normalized_path}", 'error')
                return False
                
            if os.name == 'nt':  # Windows
                # 方法1: 对于目录，直接打开；对于文件，使用/select
                try:
                    if os.path.isdir(normalized_path):
                        log(f"[DEBUG] 方法1: 直接打开目录", 'info')
                        result = subprocess.run(['explorer', normalized_path], 
                                              capture_output=True, text=True, timeout=10)
                        if result.returncode == 0:
                            log(f"[DEBUG] 方法1成功", 'info')
                            return True
                        else:
                            log(f"[DEBUG] 方法1失败，返回码: {result.returncode}, 错误: {result.stderr}", 'info')
                    else:
                        log(f"[DEBUG] 方法1: 使用/select选中文件", 'info')
                        result = subprocess.run(['explorer', '/select,', normalized_path], 
                                              capture_output=True, text=True, timeout=10)
                        if result.returncode == 0:
                            log(f"[DEBUG] 方法1成功", 'info')
                            return True
                        else:
                            log(f"[DEBUG] 方法1失败，返回码: {result.returncode}, 错误: {result.stderr}", 'info')
                except Exception as e1:
                    log(f"[DEBUG] 方法1异常: {str(e1)}", 'info')
                
                # 方法2: 使用start命令
                try:
                    log(f"[DEBUG] 方法2: 使用start命令", 'info')
                    result = subprocess.run(['cmd', '/c', 'start', '', normalized_path], 
                                          capture_output=True, text=True, timeout=10)
                    if result.returncode == 0:
                        log(f"[DEBUG] 方法2成功", 'info')
                        return True
                    else:
                        log(f"[DEBUG] 方法2失败，返回码: {result.returncode}, 错误: {result.stderr}", 'info')
                except Exception as e2:
                    log(f"[DEBUG] 方法2异常: {str(e2)}", 'info')
                
                # 方法3: 使用os.startfile
                try:
                    log(f"[DEBUG] 方法3: 使用os.startfile", 'info')
                    os.startfile(normalized_path)
                    log(f"[DEBUG] 方法3成功", 'info')
                    return True
                except Exception as e3:
                    log(f"[DEBUG] 方法3异常: {str(e3)}", 'info')
                
                # 方法4: 使用powershell
                try:
                    log(f"[DEBUG] 方法4: 使用PowerShell", 'info')
                    ps_cmd = f'Invoke-Item "{normalized_path}"'
                    result = subprocess.run(['powershell', '-Command', ps_cmd], 
                                          capture_output=True, text=True, timeout=10)
                    if result.returncode == 0:
                        log(f"[DEBUG] 方法4成功", 'info')
                        return True
                    else:
                        log(f"[DEBUG] 方法4失败，返回码: {result.returncode}, 错误: {result.stderr}", 'info')
                except Exception as e4:
                    log(f"[DEBUG] 方法4异常: {str(e4)}", 'info')
                    
            elif os.name == 'posix':  # Linux/Mac
                if sys.platform == 'darwin':  # macOS
                    subprocess.run(['open', '-R', normalized_path], check=True)
                else:  # Linux
                    subprocess.run(['xdg-open', os.path.dirname(normalized_path)], check=True)
                return True
            
            log(f"[ERROR] 所有方法都失败了", 'error')
            return False
            
        except Exception as e:
            log(f"[ERROR] 开发文件位置时发生未知异常: {str(e)}", 'error')
            return False

    def _is_temp_file(self, filename: str, file_path: str) -> bool:
        """判断是否为临时文件"""
        _, ext = os.path.splitext(filename.lower())
        
        if ext in self.temp_extensions:
            return True
        
        if any(temp_pattern in filename.lower() for temp_pattern in self.temp_dirs):
            return True
        
        if filename.startswith('~') or filename.startswith('.#'):
            return True
        
        if filename.endswith('~') or filename.endswith('.tmp'):
            return True
        
        return False

    def _should_skip_directory(self, dir_name: str) -> bool:
        """判断是否应该跳过目录"""
        if self._should_always_skip(dir_name):
            return True
        
        # 移除深度扫描限制，但保留一些基本的系统目录跳过
        system_skip_patterns = {
            'System Volume Information', '$Recycle.Bin', 
            'pagefile.sys', 'hiberfil.sys', 'swapfile.sys'
        }
        if dir_name in system_skip_patterns:
            return True
        
        return False

    def _should_always_skip(self, dir_name: str) -> bool:
        """总是应该跳过的目录"""
        return dir_name in self.scan_strategies['always_skip']['patterns']
    
    def _analyze_project_content(self, project_path: str, project_type: str) -> Dict[str, Any]:
        """分析项目内容分类"""
        categories = {
            'code': {'folders': [], 'size': 0, 'count': 0},
            'build': {'folders': [], 'size': 0, 'count': 0},
            'cache': {'folders': [], 'size': 0, 'count': 0},
            'logs': {'folders': [], 'size': 0, 'count': 0},
            'docs': {'folders': [], 'size': 0, 'count': 0},
            'config': {'folders': [], 'size': 0, 'count': 0},
            'deps': {'folders': [], 'size': 0, 'count': 0},
            'other': {'folders': [], 'size': 0, 'count': 0},
            'root_files': {'folders': [], 'size': 0, 'count': 0}  # 添加根目录文件分类
        }
        
        # 定义不同类型的目录分类规则
        category_rules = {
            'code': {
                'java': ['src', 'main', 'java', 'kotlin', 'groovy', 'scala'],
                'frontend': ['src', 'app', 'components', 'pages', 'views', 'assets', 'public', 'static'],
                'python': ['src', 'lib', 'modules', 'packages'],
                'general': ['source', 'sources', 'code']
            },
            'build': {
                'java': ['target', 'build', 'out', 'bin', 'classes', 'output'],
                'frontend': ['dist', 'build', 'out', '.next', '.nuxt', 'public/build'],
                'python': ['build', 'dist', '__pycache__', '*.egg-info'],
                'general': ['build', 'output', 'compiled']
            },
            'cache': {
                'java': ['.gradle', '.m2/repository', '.ivy2'],
                'frontend': ['node_modules', '.cache', '.parcel-cache', '.vite', '.turbo'],
                'python': ['.pytest_cache', '.mypy_cache', '.tox', '__pycache__'],
                'general': ['cache', 'tmp', 'temp', '.cache']
            },
            'logs': {
                'general': ['logs', 'log', 'debug', 'error', 'access', 'application.log']
            },
            'docs': {
                'general': ['docs', 'doc', 'documentation', 'readme', 'wiki', 'help']
            },
            'config': {
                'general': ['config', 'conf', 'settings', 'properties', '.env', '.config']
            },
            'deps': {
                'java': ['lib', 'libs', 'dependencies', 'jars'],
                'frontend': ['vendor', 'packages'],
                'python': ['site-packages', 'lib/python'],
                'general': ['vendor', 'third-party', 'external']
            }
        }
        
        try:
            # 获取项目类型对应的语言类别
            lang_category = self._get_language_category(project_type)
            
            # 先统计根目录的直接文件
            root_files_size = 0
            root_files_count = 0
            
            for item in os.listdir(project_path):
                item_path = os.path.join(project_path, item)
                
                if os.path.isfile(item_path):
                    # 统计根目录文件
                    try:
                        file_size = os.path.getsize(item_path)
                        root_files_size += file_size
                        root_files_count += 1
                    except (OSError, PermissionError):
                        continue
                elif os.path.isdir(item_path):
                    # 分类目录
                    category = self._categorize_directory(item, lang_category, category_rules)
                    
                    # 计算目录大小和文件数量
                    try:
                        dir_size = self._get_directory_size(item_path, max_files=2000, include_all=True)
                        file_count = self._count_files_in_directory(item_path, max_files=2000)
                        
                        categories[category]['folders'].append({
                            'name': item,
                            'path': item_path,
                            'size': dir_size,
                            'count': file_count
                        })
                        categories[category]['size'] += dir_size
                        categories[category]['count'] += file_count
                        
                    except (OSError, PermissionError):
                        # 无法访问的目录仍记录但大小为0
                        categories[category]['folders'].append({
                            'name': item,
                            'path': item_path,
                            'size': 0,
                            'count': 0
                        })
            
            # 添加根目录文件统计
            if root_files_count > 0:
                categories['root_files']['folders'].append({
                    'name': '根目录文件',
                    'path': project_path,
                    'size': root_files_size,
                    'count': root_files_count
                })
                categories['root_files']['size'] = root_files_size
                categories['root_files']['count'] = root_files_count
                    
        except (OSError, PermissionError):
            pass
            
        return categories
    
    def _get_language_category(self, project_type: str) -> str:
        """根据项目类型获取语言类别"""
        if any(x in project_type.lower() for x in ['java', 'maven', 'gradle', 'spring', 'kotlin']):
            return 'java'
        elif any(x in project_type.lower() for x in ['node', 'react', 'vue', 'angular', 'typescript', 'javascript', 'frontend', '前端']):
            return 'frontend'
        elif any(x in project_type.lower() for x in ['python', 'django', 'flask']):
            return 'python'
        else:
            return 'general'
    
    def _categorize_directory(self, dir_name: str, lang_category: str, category_rules: Dict) -> str:
        """将目录分类到合适的类别"""
        dir_lower = dir_name.lower()
        
        # 检查每个类别的规则
        for category, rules in category_rules.items():
            # 先检查特定语言的规则
            if lang_category in rules:
                for pattern in rules[lang_category]:
                    if pattern.lower() in dir_lower or dir_lower == pattern.lower():
                        return category
            
            # 再检查通用规则
            if 'general' in rules:
                for pattern in rules['general']:
                    if pattern.lower() in dir_lower or dir_lower == pattern.lower():
                        return category
        
        return 'other'
    
    def _count_files_in_directory(self, dir_path: str, max_files: int = 2000) -> int:
        """统计目录中的文件数量 - 改进版本"""
        file_count = 0
        processed_dirs = 0
        
        try:
            for root, dirs, files in os.walk(dir_path):
                processed_dirs += 1
                # 只跳过真正的系统目录
                dirs[:] = [d for d in dirs if not self._should_skip_for_size_calc(d)]
                
                file_count += len(files)
                if file_count >= max_files:
                    # 基于已处理目录的估算
                    if processed_dirs > 0:
                        avg_files_per_dir = file_count / processed_dirs
                        estimated_remaining_dirs = processed_dirs  # 估算还有相同数量的目录
                        estimated_remaining_files = estimated_remaining_dirs * avg_files_per_dir
                        file_count += int(estimated_remaining_files)
                    return file_count
                    
        except (OSError, PermissionError):
            pass
            
        return file_count