import os
import time
import concurrent.futures
import logging
from datetime import datetime
from typing import List, Callable, Optional, Iterator, Set, Dict, Tuple, Any
from pathlib import Path
from threading import Lock
from multiprocessing import cpu_count
from .models import FileInfo, ScanStatistics
from utils.logger import setup_logger
from config.settings import EXCLUDE_DIRS, LARGE_FILE_THRESHOLD, SYSTEM_FILE_KEYWORDS, MAX_WORKERS, BATCH_SIZE, CALLBACK_INTERVAL, SCAN_TIMEOUT

class FileScanner:
    """高性能文件扫描器"""
    
    def __init__(self):
        # 添加更详细的日志设置
        self.logger = setup_logger('scanner')
        self.statistics = ScanStatistics()
        self.exclude_dirs = set(EXCLUDE_DIRS)  # 转为集合提高查找效率
        self.large_file_threshold = LARGE_FILE_THRESHOLD
        self.system_file_keywords = SYSTEM_FILE_KEYWORDS
        self.lock = Lock()  # 用于线程安全的统计更新
        self.last_callback_time = 0  # 用于控制回调频率
        self.root_path = None  # 存储根路径用于深度计算
        self.scan_timeout = SCAN_TIMEOUT  # 扫描超时时间（秒）
        self.is_scan_cancelled = False  # 扫描取消标志
        self.processed_dirs = 0  # 已处理目录计数
        self.current_batch = []  # 当前批次
        # 使用更激进的线程池大小，CPU核心数的4倍
        self.max_workers = min(MAX_WORKERS, cpu_count() * 4)
        self.logger.info("FileScanner initialized with timeout: %d seconds, max workers: %d", 
                        self.scan_timeout, self.max_workers)
        
    def scan(self, root_path: str, exclude_dirs: Optional[List[str]] = None,
             progress_callback: Optional[Callable] = None, error_callback: Optional[Callable] = None) -> List[FileInfo]:
        """高性能扫描指定目录
        
        Args:
            root_path: 要扫描的根目录路径
            exclude_dirs: 要排除的目录列表，如果为None则使用配置中的EXCLUDE_DIRS
            progress_callback: 进度回调函数
            error_callback: 错误回调函数
        
        Returns:
            文件信息列表
        """
        # 重置扫描取消标志
        self.is_scan_cancelled = False
        
        # 使用传入的排除目录列表或默认配置
        current_exclude_dirs = self.exclude_dirs.copy()
        if exclude_dirs:
            # 合并默认排除目录和传入的排除目录
            current_exclude_dirs.update(exclude_dirs)
        
        self.statistics.start_time = datetime.now()
        self.root_path = root_path  # 设置根路径用于深度计算
        self.last_callback_time = time.time()
        self.logger.info(f"开始扫描目录: {root_path}")
        self.logger.info(f"使用{self.max_workers}个工作线程")
        self.logger.info(f"扫描超时设置: {self.scan_timeout}秒")
        
        file_list = []
        dir_queue = [root_path]  # 目录队列用于BFS扫描
        processed_dirs = set()  # 跟踪已处理的目录以避免重复
        chunks_to_process = []  # 用于分块处理结果
        
        # 计算扫描超时时间点
        timeout_time = time.time() + self.scan_timeout if self.scan_timeout > 0 else float('inf')
        
        try:
            # 使用线程池并行处理文件信息收集
            with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
                while dir_queue or chunks_to_process:
                    # 检查是否超时
                    if time.time() > timeout_time:
                        self.is_scan_cancelled = True
                        self.logger.warning(f"扫描超时 ({self.scan_timeout}秒)，将返回已扫描的文件信息")
                        if error_callback:
                            error_callback(f"扫描超时 ({self.scan_timeout}秒)")
                        break
                    
                    # 检查是否取消扫描
                    if self.is_scan_cancelled:
                        break
                    
                    # 批量处理目录
                    batch_dirs = []
                    while dir_queue and len(batch_dirs) < BATCH_SIZE:
                        current_dir = dir_queue.pop(0)
                        if current_dir not in processed_dirs:
                            processed_dirs.add(current_dir)
                            batch_dirs.append(current_dir)
                    
                    # 批量提交目录扫描任务，减少字典创建开销
                    futures = []
                    for directory in batch_dirs:
                        # 如果扫描已取消，不再提交新任务
                        if self.is_scan_cancelled:
                            break
                        
                        future = executor.submit(self._process_directory, directory, current_exclude_dirs)
                        futures.append((future, directory))
                    
                    # 处理结果，避免使用字典映射
                    for future, directory in futures:
                        try:
                            # 设置单个任务的超时时间（5秒）
                            results = future.result(timeout=5)
                            if results:
                                new_files, new_dirs = results
                                chunks_to_process.append((new_files, new_dirs))
                            
                        except concurrent.futures.TimeoutError:
                            # 单个目录处理超时，跳过该目录
                            self.logger.warning(f"处理目录超时: {directory}")
                            if error_callback:
                                error_callback(f"处理目录超时: {directory}")
                        except (PermissionError, OSError):
                            # 使用本地变量收集错误计数，减少锁争用
                            error_count = 1
                            if error_callback:
                                error_callback(f"无法访问: {directory}")
                        except Exception:
                            pass
                    
                    # 批量更新统计信息和文件列表，减少锁争用
                    self._batch_update_results(chunks_to_process, file_list, dir_queue, progress_callback)
                    chunks_to_process = []
            
            # 确保最后的进度被更新
            if progress_callback:
                progress_callback(self.statistics)
                
        except Exception as e:
            self.logger.error(f"扫描过程发生错误: {e}")
            if error_callback:
                error_callback(str(e))
        
        self.statistics.end_time = datetime.now()
        self.statistics.scan_duration = (self.statistics.end_time - self.statistics.start_time).total_seconds()
        
        # 计算性能指标
        files_per_second = 0
        if self.statistics.scan_duration > 0:
            files_per_second = self.statistics.total_files / self.statistics.scan_duration
            
        self.logger.info(f"扫描完成: 文件{self.statistics.total_files:,}个, "
                        f"目录{self.statistics.total_directories:,}个, "
                        f"总大小{FileInfo.format_size(self.statistics.total_size)}, "
                        f"扫描速度: {files_per_second:,.2f} 文件/秒")
        
        return file_list
        
    def _batch_update_results(self, chunks, file_list, dir_queue, progress_callback):
        """批量更新结果，减少锁争用和计算开销
        
        Args:
            chunks: 需要处理的块列表
            file_list: 文件信息列表
            dir_queue: 目录队列
            progress_callback: 进度回调函数
        """
        if not chunks:
            return
        
        # 性能优化：预计算大小
        start_time = time.time()
        
        # 预估大小创建列表
        total_items = sum(len(files) + len(dirs) for files, dirs in chunks)
        all_new_files = []
        all_new_dirs = []
        
        # 本地计算统计信息
        local_total_files = 0
        local_total_dirs = 0
        local_total_size = 0
        
        # 遍历所有块收集信息
        for new_files, new_dirs in chunks:
            all_new_files.extend(new_files)
            all_new_dirs.extend(new_dirs)
            
            # 快速统计
            dir_count = 0
            file_count = 0
            size_count = 0
            
            for file_info in new_files:
                if file_info.is_directory:
                    dir_count += 1
                else:
                    file_count += 1
                    size_count += file_info.size
            
            local_total_dirs += dir_count
            local_total_files += file_count
            local_total_size += size_count
        
        # 最小化锁范围
        with self.lock:
            # 更新文件列表和目录队列
            file_list.extend(all_new_files)
            dir_queue.extend(all_new_dirs)
            
            # 批量更新统计信息
            self.statistics.total_files += local_total_files
            self.statistics.total_directories += local_total_dirs
            self.statistics.total_size += local_total_size
            self.statistics.scanned_paths = self.statistics.total_files + self.statistics.total_directories
            
            # 更新已处理目录计数
            self.processed_dirs += len(all_new_dirs)
        
        # 记录性能数据
        update_time = time.time() - start_time
        self.logger.debug("Batch update processed %d files, %d dirs in %.4f seconds",
                        local_total_files, local_total_dirs, update_time)
        
        # 回调在锁外执行
        current_time = time.time()
        if progress_callback and (current_time - self.last_callback_time > CALLBACK_INTERVAL):
            progress_callback(self.statistics)
            self.last_callback_time = current_time
            
            # 每处理100个目录记录一次进度
            if self.processed_dirs % 100 == 0:
                self.logger.info("Scanned %d directories, found %d files, total size: %s",
                                self.processed_dirs, self.statistics.total_files,
                                FileInfo.format_size(self.statistics.total_size))
    
    def _process_directory(self, 
                         current_path: str,
                         exclude_dirs: Set[str]) -> Optional[Tuple[List[FileInfo], List[str]]]:
        """超高性能目录处理
        
        Args:
            current_path: 当前处理的目录路径
            exclude_dirs: 要排除的目录集合
        
        Returns:
            Tuple[文件信息列表, 子目录路径列表] 或 None
        """
        # 检查是否取消扫描
        if self.is_scan_cancelled:
            return None
            
        # 快速检查目录是否需要排除
        dir_name = os.path.basename(current_path)
        if dir_name in exclude_dirs:
            return None
            
        file_infos = []
        subdirs = []
        
        # 限制单个目录处理时间
        dir_start_time = time.time()
        
        try:
            # 快速检查目录是否存在
            if not os.path.exists(current_path) or not os.path.isdir(current_path):
                self.logger.debug("Directory does not exist or is invalid: %s", current_path)
                return None
                
            # 一次性获取所有条目
            entries = os.scandir(current_path)
            
            # 预分配空间减少动态内存分配
            entry_list = list(entries)  # 转换为列表以提前关闭scandir
            entries.close()  # 立即关闭资源
            
            # 快速过滤和处理
            for entry in entry_list:
                # 检查单个目录处理是否超时（5秒）
                if time.time() - dir_start_time > 5:
                    self.logger.warning("Directory processing timeout: %s, skipping remaining entries", current_path)
                    break
                    
                # 检查是否取消扫描
                if self.is_scan_cancelled:
                    return None
                    
                try:
                    # 使用os.DirEntry的属性直接判断，减少函数调用
                    if entry.is_dir(follow_symlinks=False):
                        if entry.name not in exclude_dirs:
                            # 为目录创建轻量级信息
                            dir_info = self._create_directory_info(entry)
                            file_infos.append(dir_info)
                            subdirs.append(entry.path)
                    elif entry.is_file(follow_symlinks=False):
                        # 为文件创建完整信息
                        file_info = self._create_file_info(entry)
                        if file_info:
                            file_infos.append(file_info)
                except (PermissionError, OSError):
                    # 性能优化：仅在调试级别记录权限错误
                    self.logger.debug("Permission denied: %s", entry.path)
                    continue
                except Exception as e:
                    # 记录其他异常，但不中断处理
                    self.logger.debug("Error processing %s: %s", entry.path, str(e))
                    continue
                    
        except (PermissionError, OSError):
            self.logger.debug("Cannot access directory: %s", current_path)
            return None
        except Exception as e:
            self.logger.error("Error processing directory %s: %s", current_path, str(e))
            return None
        
        # 记录目录处理性能数据
        process_time = time.time() - dir_start_time
        if process_time > 1.0:  # 只记录处理时间超过1秒的目录
            self.logger.info("Slow directory processing: %s took %.3f seconds, processed %d entries",
                            current_path, process_time, len(entry_list))
                            
        return file_infos, subdirs
    
    def _create_directory_info(self, entry) -> FileInfo:
        """创建目录信息对象（轻量版）"""
        file_path = entry.path
        
        # 计算深度（简化版本）
        if self.root_path:
            try:
                rel_path = file_path[len(self.root_path):]
                depth = rel_path.count(os.sep) + 1
            except Exception:
                depth = 1
        else:
            depth = 1
            
        # 目录特定的轻量级信息
        return FileInfo(
            path=file_path,
            name=entry.name,
            size=0,
            extension='',
            category='directory',
            is_directory=True,
            created_time=None,
            modified_time=None,
            access_time=None,
            parent_path=os.path.dirname(file_path),
            depth=depth,
            is_system_file=False,
            is_large_file=False
        )
    
    def _create_file_info(self, entry) -> Optional[FileInfo]:
        """快速创建文件信息对象"""
        try:
            # 获取基本信息
            name = entry.name
            file_path = entry.path
            
            # 获取文件大小和时间戳
            stat_result = entry.stat(follow_symlinks=False)
            size = stat_result.st_size
            
            # 获取时间戳
            created_time = datetime.fromtimestamp(stat_result.st_ctime)
            modified_time = datetime.fromtimestamp(stat_result.st_mtime)
            
            # 快速判断是否为大文件
            is_large_file = size > self.large_file_threshold
            
            # 快速提取扩展名
            dot_index = name.rfind('.')
            extension = name[dot_index:].lower() if dot_index > 0 else ''
            
            # 计算深度（简化版本）
            if self.root_path:
                try:
                    rel_path = file_path[len(self.root_path):]
                    depth = rel_path.count(os.sep) + 1
                except Exception:
                    depth = 1
            else:
                depth = 1
            
            # 系统文件判断（使用预编译的关键字列表）
            is_system_file = False
            if self.system_file_keywords:
                path_lower = file_path.lower()
                for keyword in self.system_file_keywords:
                    if keyword in path_lower:
                        is_system_file = True
                        break
                        
            # 创建最小化的文件信息对象
            return FileInfo(
                path=file_path,
                name=name,
                size=size,
                extension=extension,
                category='',
                is_directory=False,
                created_time=created_time,
                modified_time=modified_time,
                access_time=datetime.fromtimestamp(stat_result.st_atime),
                parent_path=os.path.dirname(file_path),
                depth=depth,
                is_system_file=is_system_file,
                is_large_file=is_large_file
            )
        except Exception:
            return None
    
    def _create_empty_file_info(self, entry) -> FileInfo:
        """创建空的文件信息对象作为后备"""
        file_path = entry.path
        name = entry.name
        
        return FileInfo(
            path=file_path,
            name=name,
            size=0,
            extension='',
            category='unknown',
            is_directory=entry.is_dir(follow_symlinks=False),
            created_time=None,
            modified_time=None,
            access_time=None,
            parent_path=os.path.dirname(file_path),
            depth=1,
            is_system_file=False,
            is_large_file=False
        )
    
    def _get_file_info(self, entry) -> FileInfo:
        """高效获取文件信息
        
        Args:
            entry: 目录条目
        
        Returns:
            文件信息对象
        """
        try:
            # 使用entry.stat()一次性获取所有文件属性
            stat_result = entry.stat(follow_symlinks=False)
            is_directory = entry.is_dir(follow_symlinks=False)
            file_path = entry.path
            
            # 高效计算父路径和深度
            parent_path = os.path.dirname(file_path)
            
            # 使用相对路径计算深度，更准确
            if self.root_path and file_path.startswith(self.root_path):
                # 去除根路径后计算深度
                rel_path = file_path[len(self.root_path):].lstrip(os.sep)
                depth = rel_path.count(os.sep) + 1
            else:
                depth = file_path.count(os.sep)
            
            # 只对文件计算大小和相关属性
            size = 0
            is_large_file = False
            extension = ''
            
            if not is_directory:
                size = stat_result.st_size
                # 预计算大文件标记
                if size > self.large_file_threshold:
                    is_large_file = True
                # 高效提取扩展名
                name = entry.name
                dot_index = name.rfind('.')
                if dot_index > 0:  # 确保扩展名不是文件名的第一个字符
                    extension = name[dot_index:].lower()
            
            # 系统文件判断优化：只在必要时执行
            is_system_file = False
            if any(kw in file_path.lower() for kw in self.system_file_keywords):
                is_system_file = True
            
            # 避免重复的时间戳转换
            ctime = datetime.fromtimestamp(stat_result.st_ctime)
            mtime = datetime.fromtimestamp(stat_result.st_mtime)
            atime = datetime.fromtimestamp(stat_result.st_atime)
            
            return FileInfo(
                path=file_path,
                name=entry.name,
                size=size,
                extension=extension,
                category='',  # 分类将在后续处理
                is_directory=is_directory,
                created_time=ctime,
                modified_time=mtime,
                access_time=atime,
                parent_path=parent_path,
                depth=depth,
                is_system_file=is_system_file,
                is_large_file=is_large_file
            )
            
        except Exception:
            # 避免异常日志开销，直接抛出
            raise