import os
import queue
import threading
import time
from typing import List, Dict, Any, Optional, Callable
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('logs/pipeline_manager.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger('pipeline_manager')

from scanner.parallel_scanner import ParallelScanner
from hash_calculator.parallel_hash_calculator import ParallelHashCalculator
from duplicate_detector.duplicate_detector import DuplicateDetector


class PipelineManager:
    def __init__(self,
                 scanner: ParallelScanner,
                 hash_calculator: ParallelHashCalculator,
                 detector: DuplicateDetector,
                 batch_size: int = 100):
        """
        初始化流水线管理器
        :param scanner: 并行扫描器实例
        :param hash_calculator: 并行哈希计算器实例
        :param detector: 重复检测器实例
        :param batch_size: 批处理大小
        """
        self.scanner = scanner
        self.hash_calculator = hash_calculator
        self.detector = detector
        self.batch_size = batch_size
        self._scanned_files_queue = queue.Queue(maxsize=batch_size * 2)
        self._hashed_files_queue = queue.Queue(maxsize=batch_size * 2)
        self._is_running = False
        self._threads = []
        self._lock = threading.Lock()
        self._error_count = 0

    def _scan_worker(self, folders: List[str], recursive: bool, ignore_patterns: Optional[List[str]]):
        """扫描工作线程"""
        try:
            logger.info('扫描工作线程已启动')
            # 开始扫描
            file_info_list = self.scanner.start_scan(
                folders,
                recursive=recursive,
                ignore_patterns=ignore_patterns
            )
            logger.info(f'扫描完成，共找到 {len(file_info_list)} 个文件')

            # 将所有扫描结果放入队列
            for file_info in file_info_list:
                if self.scanner._cancelled:
                    break
                self._scanned_files_queue.put(file_info)

            # 放入结束标记
            self._scanned_files_queue.put(None)
            logger.info('扫描工作线程已完成')
        except Exception as e:
            logger.error(f'扫描工作线程出错: {str(e)}')
            with self._lock:
                self._error_count += 1
            # 放入结束标记
            self._scanned_files_queue.put(None)

    def _hash_worker(self):
        """哈希计算工作线程"""
        try:
            logger.info('哈希计算工作线程已启动')
            while True:
                # 从队列获取一批文件
                batch = []
                try:
                    # 尝试获取一个项目
                    item = self._scanned_files_queue.get(timeout=1.0)
                    if item is None:  # 结束标记
                        self._scanned_files_queue.task_done()
                        break
                    batch.append(item)

                    # 尝试获取更多项目，但不阻塞
                    while len(batch) < self.batch_size:
                        try:
                            item = self._scanned_files_queue.get_nowait()
                            if item is None:  # 结束标记
                                self._scanned_files_queue.task_done()
                                break
                            batch.append(item)
                        except queue.Empty:
                            break
                except queue.Empty:
                    if not self._is_running and self._scanned_files_queue.empty():
                        break
                    continue

                # 处理批次中的文件
                if batch:
                    # 提取文件路径
                    file_paths = [item['path'] for item in batch if item['size'] > 0]
                    # 计算哈希值
                    hash_results = self.hash_calculator.compute_file_hashes(file_paths)

                    # 更新文件信息并放入结果队列
                    for item in batch:
                        if item['size'] > 0 and item['path'] in hash_results:
                            item['hash'] = hash_results[item['path']]
                        self._hashed_files_queue.put(item)

                        # 标记任务完成
                        self._scanned_files_queue.task_done()

            # 放入结束标记
            self._hashed_files_queue.put(None)
            logger.info('哈希计算工作线程已完成')
        except Exception as e:
            logger.error(f'哈希计算工作线程出错: {str(e)}')
            with self._lock:
                self._error_count += 1
            # 放入结束标记
            self._hashed_files_queue.put(None)

    def _detect_worker(self, progress_callback: Optional[Callable[[int, int], None]]):
        """重复检测工作线程"""
        try:
            logger.info('重复检测工作线程已启动')
            total_files = 0
            processed_files = 0

            while True:
                # 从队列获取文件信息
                item = self._hashed_files_queue.get()
                if item is None:  # 结束标记
                    self._hashed_files_queue.task_done()
                    break

                # 添加到检测器
                self.detector.add_file(item)
                processed_files += 1

                # 更新进度
                if progress_callback:
                    progress_callback(processed_files, total_files)

                # 标记任务完成
                self._hashed_files_queue.task_done()

            logger.info('所有文件已添加到检测器')
        except Exception as e:
            logger.error(f'重复检测工作线程出错: {str(e)}')
            with self._lock:
                self._error_count += 1

    def start(self, folders: List[str], recursive: bool = True, ignore_patterns: Optional[List[str]] = None, progress_callback: Optional[Callable[[int, int], None]] = None):
        """
        启动流水线处理
        :param folders: 要扫描的文件夹列表
        :param recursive: 是否递归扫描
        :param ignore_patterns: 要忽略的文件模式列表
        :param progress_callback: 进度回调函数
        :return: 重复文件组列表
        """
        with self._lock:
            if self._is_running:
                logger.warning('流水线已在运行中')
                return []
            self._is_running = True
            self._error_count = 0

        # 清除之前的结果
        self.detector.clear()

        # 创建并启动工作线程
        self._threads = []

        # 扫描线程
        scan_thread = threading.Thread(
            target=self._scan_worker,
            args=(folders, recursive, ignore_patterns)
        )
        self._threads.append(scan_thread)
        scan_thread.start()

        # 哈希计算线程
        hash_thread = threading.Thread(target=self._hash_worker)
        self._threads.append(hash_thread)
        hash_thread.start()

        # 重复检测线程
        detect_thread = threading.Thread(
            target=self._detect_worker,
            args=(progress_callback,)
        )
        self._threads.append(detect_thread)
        detect_thread.start()

        # 等待所有线程完成
        for thread in self._threads:
            thread.join()

        # 检查是否有错误
        with self._lock:
            self._is_running = False
            if self._error_count > 0:
                logger.error(f'流水线处理过程中出现 {self._error_count} 个错误')

        # 执行重复检测
        logger.info('开始查找重复文件')
        duplicate_groups = self.detector.find_duplicates()
        logger.info(f'重复文件查找完成，找到 {len(duplicate_groups)} 组重复文件')

        return duplicate_groups

    def stop(self):
        """停止流水线处理"""
        with self._lock:
            if not self._is_running:
                logger.warning('流水线未运行')
                return
            self._is_running = False

        # 取消扫描和哈希计算
        self.scanner.cancel_scan()
        self.hash_calculator.cancel_all()

        # 清空队列
        while not self._scanned_files_queue.empty():
            try:
                self._scanned_files_queue.get_nowait()
                self._scanned_files_queue.task_done()
            except queue.Empty:
                break

        while not self._hashed_files_queue.empty():
            try:
                self._hashed_files_queue.get_nowait()
                self._hashed_files_queue.task_done()
            except queue.Empty:
                break

        # 等待线程结束
        for thread in self._threads:
            if thread.is_alive():
                thread.join(timeout=3.0)

        logger.info('流水线已停止')

    @property
    def is_running(self):
        """检查流水线是否正在运行"""
        with self._lock:
            return self._is_running

    @property
    def error_count(self):
        """获取错误数量"""
        with self._lock:
            return self._error_count


if __name__ == '__main__':
    # 测试流水线管理器
    def progress_callback(processed, total):
        print(f'进度: {processed}/{total} 文件已处理')

    # 创建组件实例
    scanner = ParallelScanner(max_workers=4)
    hash_calculator = ParallelHashCalculator(max_workers=4)
    detector = DuplicateDetector()

    # 创建流水线管理器
    pipeline = PipelineManager(scanner, hash_calculator, detector, batch_size=50)

    # 启动流水线
    folders = ['.']  # 扫描当前目录
    ignore_patterns = ['.git/', '.trae/', '*.log', 'tmp/']
    print('开始扫描和检测重复文件...')
    start_time = time.time()
    duplicate_groups = pipeline.start(folders, recursive=True, ignore_patterns=ignore_patterns, progress_callback=progress_callback)
    end_time = time.time()

    # 输出结果
    print(f'扫描和检测完成，耗时: {end_time - start_time:.4f} 秒')
    print(f'找到 {len(duplicate_groups)} 组重复文件')
    for i, group in enumerate(duplicate_groups):
        print(f'组 {i+1}: {len(group)} 个文件')
        for file_info in group[:2]:  # 只显示前两个文件
            print(f'  - {file_info["path"]}')
        if len(group) > 2:
            print(f'  ... 等 {len(group) - 2} 个文件')

    print('流水线管理器测试完成')

# 性能优化说明:
# 1. 使用生产者-消费者模式实现扫描、哈希计算和重复检测的流水线处理
# 2. 通过队列实现线程间通信，解耦各个处理阶段
# 3. 实现了批处理机制，减少线程切换和队列操作开销
# 4. 支持并行扫描、并行哈希计算，充分利用多核CPU
# 5. 实现了优雅的停止和取消机制
# 6. 提供了错误处理和统计
# 7. 使用锁机制确保线程安全
# 8. 优化了队列大小和批处理大小，平衡内存使用和处理效率
# 9. 实现了进度回调机制，提供用户反馈
# 10. 支持递归和非递归扫描模式，适应不同需求