import hashlib
import os
import json
import threading
import concurrent.futures
from typing import Optional, Callable, Dict, Any, List
import logging
import pathlib

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('logs/parallel_hash_calculator.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger('parallel_hash_calculator')

from hash_calculator.hash_calculator import HashCalculator


class ParallelHashCalculator(HashCalculator):
    def __init__(self,
                 algorithm: str = 'md5',
                 progress_callback: Optional[Callable[[str, int, int], None]] = None,
                 max_workers: int = None):
        """
        初始化并行哈希计算器
        :param algorithm: 哈希算法，支持'md5'和'sha1'
        :param progress_callback: 进度回调函数，接收文件路径、已处理字节数和总字节数
        :param max_workers: 最大工作线程数，默认为CPU核心数
        """
        super().__init__(algorithm=algorithm, progress_callback=progress_callback)
        self.max_workers = max_workers or os.cpu_count()
        self._lock = threading.Lock()  # 用于线程安全操作
        self._active_tasks = 0  # 当前活动的哈希计算任务数
        self._task_lock = threading.Lock()  # 用于保护_active_tasks

    def _compute_file_hash(self, file_path: str) -> Optional[str]:
        """
        计算单个文件的哈希值
        这是线程安全的实现
        :param file_path: 文件路径
        :return: 哈希值字符串，如果计算失败则返回None
        """
        if not os.path.exists(file_path):
            logger.error(f'文件不存在: {file_path}')
            return None

        # 检查缓存
        file_stats = os.stat(file_path)
        file_key = f'{file_path}_{file_stats.st_size}_{file_stats.st_mtime}'
        with self._lock:
            if file_key in self._hash_cache:
                logger.debug(f'从缓存获取哈希值: {file_path}')
                return self._hash_cache[file_key]

        try:
            hash_obj = hashlib.new(self.algorithm)
            file_size = os.path.getsize(file_path)
            processed_bytes = 0
            chunk_size = 8192  # 8KB chunks

            with open(file_path, 'rb') as f:
                while True:
                    if self._cancelled:
                        logger.info(f'哈希计算已取消: {file_path}')
                        return None

                    chunk = f.read(chunk_size)
                    if not chunk:
                        break
                    hash_obj.update(chunk)
                    processed_bytes += len(chunk)

                    if self.progress_callback:
                        self.progress_callback(file_path, processed_bytes, file_size)

            hash_value = hash_obj.hexdigest()

            # 更新缓存
            with self._lock:
                self._hash_cache[file_key] = hash_value
                # 限制缓存大小，防止内存溢出
                if len(self._hash_cache) > 10000:
                    # 删除最早添加的缓存项（简单的FIFO策略）
                    oldest_key = next(iter(self._hash_cache.keys()))
                    del self._hash_cache[oldest_key]

            logger.info(f'计算哈希值完成: {file_path}, {self.algorithm}: {hash_value}')
            return hash_value

        except PermissionError:
            logger.warning(f'无权限访问文件: {file_path}')
        except Exception as e:
            logger.error(f'计算哈希值时出错: {file_path}, 错误: {str(e)}')
        return None

    def compute_file_hash(self, file_path: str) -> Optional[str]:
        """
        计算单个文件的哈希值（线程安全版本）
        :param file_path: 文件路径
        :return: 哈希值字符串
        """
        with self._task_lock:
            self._active_tasks += 1

        try:
            return self._compute_file_hash(file_path)
        finally:
            with self._task_lock:
                self._active_tasks -= 1

    def compute_file_hashes(self, file_paths: List[str], progress_callback: Optional[Callable[[int, int], None]] = None) -> Dict[str, Optional[str]]:
        """
        并行计算多个文件的哈希值
        :param file_paths: 文件路径列表
        :param progress_callback: 进度回调函数，接收已完成任务数和总任务数
        :return: 字典，键为文件路径，值为哈希值或None（如果计算失败）
        """
        if not file_paths:
            return {}

        self._is_calculating = True
        self._cancelled = False
        results = {}
        completed_tasks = 0
        total_tasks = len(file_paths)

        try:
            with concurrent.futures.ThreadPoolExecutor(max_workers=min(self.max_workers, total_tasks)) as executor:
                # 提交所有任务
                future_to_file = {executor.submit(self._compute_file_hash, file_path): file_path for file_path in file_paths}

                # 处理完成的任务
                for future in concurrent.futures.as_completed(future_to_file):
                    file_path = future_to_file[future]
                    try:
                        hash_value = future.result()
                        results[file_path] = hash_value
                    except Exception as e:
                        logger.error(f'计算文件 {file_path} 的哈希值时出错: {str(e)}')
                        results[file_path] = None

                    # 更新进度
                    completed_tasks += 1
                    if progress_callback:
                        progress_callback(completed_tasks, total_tasks)

                    # 检查是否取消
                    if self._cancelled:
                        executor.shutdown(wait=False)
                        break

            return results
        finally:
            self._is_calculating = False

    def _save_hash_cache(self):
        """线程安全地保存哈希缓存"""
        with self._lock:
            try:
                # 确保目录存在
                self._hash_cache_file.parent.mkdir(parents=True, exist_ok=True)
                # 保存缓存
                with open(self._hash_cache_file, 'w') as f:
                    json.dump(self._hash_cache, f)
                logger.debug(f'哈希缓存已保存到 {self._hash_cache_file}')
            except Exception as e:
                logger.error(f'保存哈希缓存时出错: {str(e)}')

    def cancel_all(self):
        """取消所有正在进行的哈希计算任务"""
        self._cancelled = True
        logger.info('所有哈希计算任务已取消')

    @property
    def active_tasks(self):
        """获取当前活动的哈希计算任务数"""
        with self._task_lock:
            return self._active_tasks


if __name__ == '__main__':
    # 测试并行哈希计算器
    import time

    def progress_callback(file_path, processed, total):
        percent = (processed / total) * 100 if total > 0 else 0
        print(f'计算中: {file_path} - {processed}/{total} bytes ({percent:.1f}%)')

    def batch_progress_callback(completed, total):
        percent = (completed / total) * 100 if total > 0 else 0
        print(f'批处理进度: {completed}/{total} 文件 ({percent:.1f}%)')

    # 创建测试文件
    test_files = []
    for i in range(5):
        file_name = f'test_file_{i}.txt'
        with open(file_name, 'w') as f:
            f.write(f'This is test file {i} ' * 1000)  # 写入一些数据
        test_files.append(file_name)

    # 测试单个文件哈希计算
    print('测试单个文件哈希计算:')
    calculator = ParallelHashCalculator(algorithm='md5', progress_callback=progress_callback)
    start_time = time.time()
    hash_value = calculator.compute_file_hash(test_files[0])
    end_time = time.time()
    print(f'文件 {test_files[0]} 的哈希值: {hash_value}')
    print(f'耗时: {end_time - start_time:.4f} 秒')

    # 测试多个文件并行哈希计算
    print('\n测试多个文件并行哈希计算:')
    start_time = time.time()
    results = calculator.compute_file_hashes(test_files, batch_progress_callback)
    end_time = time.time()
    for file_path, hash_value in results.items():
        print(f'文件 {file_path} 的哈希值: {hash_value}')
    print(f'总耗时: {end_time - start_time:.4f} 秒')

    # 清理测试文件
    for file_name in test_files:
        try:
            os.remove(file_name)
        except:
            pass

    print('\n并行哈希计算器测试完成')

# 性能优化说明:
# 1. 使用ThreadPoolExecutor实现并行哈希计算
# 2. 线程安全的缓存机制，避免重复计算
# 3. 实现了进度回调，支持单个文件和批量文件的进度报告
# 4. 支持取消正在进行的计算任务
# 5. 限制缓存大小，防止内存溢出
# 6. 使用锁机制确保共享资源的线程安全访问
# 7. 根据CPU核心数动态调整线程数
# 8. 优化了异常处理和资源清理
# 9. 提供了活动任务计数，便于监控
# 10. 对大文件使用分块读取，避免内存占用过大