#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
多线程相似度比对模块
用于处理大量目录的相似度比对任务
"""

import os
import logging
import math
from typing import List, Dict, Any, Optional, Callable, Tuple
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
import time

logger = logging.getLogger(__name__)

# 性能优化常量
BATCH_SIZE_SMALL = 200   # 小型数据集批大小
BATCH_SIZE_MEDIUM = 500  # 中型数据集批大小
BATCH_SIZE_LARGE = 1000  # 大型数据集批大小
MAX_COMPARISONS_PER_BATCH = 100000  # 每批最大比较次数


logger = logging.getLogger(__name__)


class MultiThreadSimilarityMatcher:
    """多线程相似度匹配器"""
    
    def __init__(self, max_workers: Optional[int] = None):
        # 动态调整线程池大小，最多使用16个线程
        self.max_workers = max_workers or min(16, (os.cpu_count() or 1) * 2)
        
    def find_similar_directories(self, 
                               directories: List[Dict[str, Any]], 
                               progress_callback: Optional[Callable[[int, int], None]] = None,
                               status_callback: Optional[Callable[[str], None]] = None,
                               compare_size: bool = True, 
                               similarity_threshold: float = 95.0) -> List[List[Dict[str, Any]]]:
        """
        使用多线程进行相似度比对
        
        :param directories: 目录信息列表
        :param progress_callback: 进度回调函数
        :param status_callback: 状态回调函数，用于显示详细处理状态
        :param compare_size: 是否比较大小
        :param similarity_threshold: 相似度阈值
        :return: 相似目录组列表
        """
        total_dirs = len(directories)
        
        # 如果目录数量少于2，无法进行比较，直接返回空列表
        if total_dirs < 2:
            logger.info(f'目录数量不足2个（{total_dirs}个），无法进行相似度比较')
            if status_callback:
                status_callback(f"目录数量不足2个，无法进行比较")
            return []
        
        # 提前计算总比较次数
        total_comparisons = total_dirs * (total_dirs - 1) // 2
        
        # 显示初始状态
        if status_callback:
            status_callback(f"准备处理 {total_dirs} 个目录，共 {total_comparisons:,} 次比较")
        
        # 性能优化1：根据目录数量和预期比较次数动态选择批大小
        batch_size = self._determine_optimal_batch_size(total_dirs, total_comparisons)
        
        # 根据目录数量和批大小选择处理策略
        if total_dirs <= 100:
            mode = "单线程处理（小数据集优化）"
            if status_callback:
                status_callback(f"使用{mode}")
            logger.info(f"使用{mode}，目录数: {total_dirs}")
            return self._single_thread_processing(directories, progress_callback, status_callback, compare_size, similarity_threshold)
        elif total_comparisons <= MAX_COMPARISONS_PER_BATCH:
            mode = f"多线程处理（中等数据集优化），线程数: {self.max_workers}"
            if status_callback:
                status_callback(f"使用{mode}")
            logger.info(f"使用{mode}，比较次数: {total_comparisons}")
            return self._multi_thread_processing(directories, progress_callback, status_callback, compare_size, similarity_threshold)
        else:
            mode = f"分批多线程处理（大数据集优化），批大小: {batch_size}"
            if status_callback:
                status_callback(f"使用{mode}")
            logger.info(f"使用{mode}，总目录数: {total_dirs}，总比较次数: {total_comparisons}")
            return self._batch_multi_thread_processing(directories, progress_callback, status_callback, batch_size, compare_size, similarity_threshold)
    
    def _determine_optimal_batch_size(self, total_dirs: int, total_comparisons: int) -> int:
        """
        根据目录数量和总比较次数确定最佳批大小
        
        :param total_dirs: 总目录数
        :param total_comparisons: 总比较次数
        :return: 最佳批大小
        """
        if total_comparisons <= 10000:
            return BATCH_SIZE_SMALL
        elif total_comparisons <= 500000:
            return BATCH_SIZE_MEDIUM
        else:
            # 对于超大数据集，动态计算批大小
            # 确保每批的比较次数不超过MAX_COMPARISONS_PER_BATCH
            optimal_size = int(math.sqrt(2 * MAX_COMPARISONS_PER_BATCH))
            return min(max(optimal_size, BATCH_SIZE_LARGE), total_dirs)
    
    def _single_thread_processing(self, directories: List[Dict[str, Any]], 
                                 progress_callback: Optional[Callable[[int, int], None]],
                                 status_callback: Optional[Callable[[str], None]],
                                 compare_size: bool, similarity_threshold: float) -> List[List[Dict[str, Any]]]:
        """单线程处理（适用于小数据集）"""
        from string_similarity import calculate_directory_name_similarity, is_similar_by_threshold
        
        total_dirs = len(directories)
        processed_comparisons = 0
        total_comparisons = total_dirs * (total_dirs - 1) // 2
        
        logger.info(f'使用单线程处理 {total_dirs} 个目录，{total_comparisons} 次比较')
        
        # 性能优化2：预处理所有目录名称，避免重复计算
        processed_dirs = []
        for dir_info in directories:
            name = dir_info.get('name', os.path.basename(dir_info['path']))
            processed_dirs.append({
                'info': dir_info,
                'name': name,
                'size': dir_info['size']
            })
        
        similar_pairs = []
        
        for i in range(total_dirs):
            # 每处理10%的目录更新一次状态
            if status_callback and i % max(1, total_dirs // 10) == 0:
                status_callback(f"处理目录 {i+1}/{total_dirs}")
                
            for j in range(i + 1, total_dirs):
                dir1, dir2 = processed_dirs[i], processed_dirs[j]
                processed_comparisons += 1
                
                # 更新进度
                if progress_callback:
                    progress_callback(processed_comparisons, total_comparisons)
                
                # 先检查大小
                if compare_size and dir1['size'] != dir2['size']:
                    continue
                
                # 优化3：提前判断字符串长度差异，如果长度差异超过阈值则跳过
                if abs(len(dir1['name']) - len(dir2['name'])) > max(len(dir1['name']), len(dir2['name'])) * (1 - similarity_threshold / 100):
                    continue
                
                # 检查相似度
                similarity = calculate_directory_name_similarity(dir1['name'], dir2['name'])
                if similarity >= similarity_threshold:
                    similar_pairs.append((i, j, similarity))
                    logger.debug(f'找到相似目录: "{dir1["name"]}" vs "{dir2["name"]}", 相似度: {similarity:.2f}%')
        
        return self._build_similarity_groups(directories, similar_pairs)
    
    def _multi_thread_processing(self, directories: List[Dict[str, Any]], 
                                progress_callback: Optional[Callable[[int, int], None]],
                                status_callback: Optional[Callable[[str], None]],
                                compare_size: bool, similarity_threshold: float) -> List[List[Dict[str, Any]]]:
        """多线程处理（适用于中等数据集）"""
        total_dirs = len(directories)
        total_comparisons = total_dirs * (total_dirs - 1) // 2
        
        logger.info(f'使用多线程处理 {total_dirs} 个目录，{total_comparisons} 次比较，线程数: {self.max_workers}')
        
        # 性能优化4：预处理所有目录名称和大小，减少重复计算
        processed_dirs = []
        for dir_info in directories:
            name = dir_info.get('name', os.path.basename(dir_info['path']))
            processed_dirs.append({
                'info': dir_info,
                'name': name,
                'size': dir_info['size']
            })
        
        # 线程安全的进度计数器
        progress_lock = threading.Lock()
        processed_comparisons = {'count': 0}
        
        # 优化5：添加批次进度状态更新
        last_progress_update = 0
        def update_progress():
            nonlocal last_progress_update
            with progress_lock:
                processed_comparisons['count'] += 1
                current = processed_comparisons['count']
                
                # 每1%进度或每1000次比较更新一次进度
                if progress_callback and (current % max(1, total_comparisons // 100) == 0 or 
                                          current % 1000 == 0 or 
                                          current == total_comparisons):
                    progress_callback(current, total_comparisons)
                    
                # 每10%进度更新一次状态
                progress_percent = current * 100 // total_comparisons
                if status_callback and progress_percent >= last_progress_update + 10:
                    status_callback(f"正在比对... {progress_percent}% 完成")
                    last_progress_update = progress_percent
        
        similar_pairs = []
        pairs_lock = threading.Lock()
        
        def add_similar_pair(pair_data):
            with pairs_lock:
                similar_pairs.append(pair_data)
        
        # 创建任务队列
        tasks = []
        for i in range(total_dirs):
            for j in range(i + 1, total_dirs):
                # 优化6：提前过滤一些明显不相似的情况
                if compare_size and processed_dirs[i]['size'] != processed_dirs[j]['size']:
                    update_progress()  # 即使跳过也要更新进度
                    continue
                
                # 优化7：通过长度差异快速过滤
                len_diff = abs(len(processed_dirs[i]['name']) - len(processed_dirs[j]['name']))
                max_len = max(len(processed_dirs[i]['name']), len(processed_dirs[j]['name']))
                if max_len > 0 and len_diff > max_len * (1 - similarity_threshold / 100):
                    update_progress()  # 即使跳过也要更新进度
                    continue
                
                # 只将可能相似的对加入任务队列
                tasks.append((i, j))
        
        # 提交任务到线程池
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            future_to_pair = {}
            batch_start_time = time.time()
            skipped_count = 0
            
            for i, j in tasks:
                future = executor.submit(
                    self._compare_directories_similarity,
                    processed_dirs[i]['info'], processed_dirs[j]['info'], i, j,
                    False, similarity_threshold,  # 这里传入False，因为已经提前检查过大小了
                    update_progress
                )
                future_to_pair[future] = (i, j)
                
                # 更新进度和日志
                current = processed_comparisons['count'] + len(future_to_pair)
                if current % 1000 == 0 or current == total_comparisons:
                    elapsed_time = time.time() - batch_start_time
                    comparisons_per_second = current / elapsed_time if elapsed_time > 0 else 0
                    logger.info(f"已提交 {current}/{total_comparisons} 个比较任务 ({current/total_comparisons*100:.1f}%)，速度: {comparisons_per_second:.1f} 任务/秒")
            
            # 收集结果
            for future in as_completed(future_to_pair):
                try:
                    result = future.result()
                    if result is not None:
                        add_similar_pair(result)
                except Exception as e:
                    logger.error(f'多线程比较出错: {str(e)}', exc_info=True)
                    
            # 记录跳过的比较数量
            skipped_count = total_comparisons - len(future_to_pair)
            if skipped_count > 0:
                logger.info(f"因提前过滤跳过 {skipped_count} 次比较 ({skipped_count/total_comparisons*100:.1f}%)")
        
        # 确保进度达到100%
        if progress_callback:
            progress_callback(total_comparisons, total_comparisons)
            
        if status_callback:
            status_callback(f"比对完成，找到 {len(similar_pairs)} 对相似目录")
            
        logger.info(f'多线程处理完成，找到 {len(similar_pairs)} 个相似对')
        return self._build_similarity_groups(directories, similar_pairs)
    
    def _batch_multi_thread_processing(self, directories: List[Dict[str, Any]], 
                                      progress_callback: Optional[Callable[[int, int], None]],
                                      status_callback: Optional[Callable[[str], None]],
                                      batch_size: int, compare_size: bool, similarity_threshold: float) -> List[List[Dict[str, Any]]]:
        """分批多线程处理（适用于大数据集）"""
        total_dirs = len(directories)
        all_groups = []
        
        logger.info(f'使用分批多线程处理 {total_dirs} 个目录，批大小: {batch_size}')
        
        # 性能优化8：先按大小分组，减少跨批次比较
        if compare_size:
            size_groups = {}
            for dir_info in directories:
                size = dir_info['size']
                if size not in size_groups:
                    size_groups[size] = []
                size_groups[size].append(dir_info)
            
            # 对每个大小组单独处理
            batch_idx = 0
            total_size_groups = len(size_groups)
            
            for size, size_dirs in size_groups.items():
                batch_idx += 1
                
                if status_callback:
                    status_callback(f"处理大小组 {batch_idx}/{total_size_groups} (大小: {size} bytes)")
                    
                # 对每个大小组再进行分批处理
                size_total_dirs = len(size_dirs)
                size_total_batches = (size_total_dirs + batch_size - 1) // batch_size
                
                for size_batch_idx in range(size_total_batches):
                    start_idx = size_batch_idx * batch_size
                    end_idx = min(start_idx + batch_size, size_total_dirs)
                    batch_dirs = size_dirs[start_idx:end_idx]
                    batch_size_actual = len(batch_dirs)
                    
                    batch_start_time = time.time()
                    
                    if status_callback:
                        status_callback(f"处理大小组批次 {size_batch_idx + 1}/{size_total_batches}: 目录 {start_idx+1}-{end_idx}")
                    logger.info(f'处理大小组批次 {size_batch_idx + 1}/{size_total_batches}: 目录 {start_idx+1}-{end_idx} ({batch_size_actual} 个目录)')
                    
                    try:
                        # 为当前批次创建进度回调
                        def batch_progress(processed, total):
                            # 计算全局进度
                            if progress_callback:
                                # 简化的全局进度计算
                                global_progress = (batch_idx - 1) * batch_size + processed
                                global_total = total_dirs
                                progress_callback(global_progress, global_total)
                         
                        # 处理当前批次（注意这里compare_size设为False，因为已经按大小分组了）
                        batch_groups = self._multi_thread_processing(
                            batch_dirs, batch_progress, status_callback, False, similarity_threshold
                        )
                        all_groups.extend(batch_groups)
                        
                        # 添加批次间的短暂休息，避免CPU过载
                        if size_batch_idx < size_total_batches - 1:
                            time.sleep(0.05)  # 更小的休息时间
                            
                        batch_end_time = time.time()
                        batch_duration = batch_end_time - batch_start_time
                        logger.info(f'大小组批次 {size_batch_idx + 1} 处理完成，耗时: {batch_duration:.2f}秒，找到 {len(batch_groups)} 组相似目录')
                    except Exception as e:
                        logger.error(f'大小组批次 {size_batch_idx + 1} 处理失败: {str(e)}', exc_info=True)
                        if status_callback:
                            status_callback(f'大小组批次 {size_batch_idx + 1} 处理失败: {str(e)}')
        else:
            # 不按大小比较的情况，使用原始的分批处理
            total_batches = (total_dirs + batch_size - 1) // batch_size
            
            for batch_idx in range(total_batches):
                start_idx = batch_idx * batch_size
                end_idx = min(start_idx + batch_size, total_dirs)
                batch_dirs = directories[start_idx:end_idx]
                batch_size_actual = len(batch_dirs)
                
                batch_start_time = time.time()
                
                if status_callback:
                    status_callback(f"处理批次 {batch_idx + 1}/{total_batches}: 目录 {start_idx+1}-{end_idx}")
                    
                logger.info(f'处理批次 {batch_idx + 1}/{total_batches}: 目录 {start_idx+1}-{end_idx} ({batch_size_actual} 个目录)')
                
                try:
                    # 为当前批次创建进度回调
                    def batch_progress(processed, total):
                        # 计算全局进度
                        if progress_callback:
                            global_progress = batch_idx * batch_size + processed
                            global_total = total_dirs
                            progress_callback(global_progress, global_total)
                    
                    # 处理当前批次
                    batch_groups = self._multi_thread_processing(
                        batch_dirs, batch_progress, status_callback, compare_size, similarity_threshold
                    )
                    all_groups.extend(batch_groups)
                    
                    # 添加批次间的短暂休息，避免CPU过载
                    if batch_idx < total_batches - 1:
                        time.sleep(0.1)
                        
                    batch_end_time = time.time()
                    batch_duration = batch_end_time - batch_start_time
                    logger.info(f'批次 {batch_idx + 1} 处理完成，耗时: {batch_duration:.2f}秒，找到 {len(batch_groups)} 组相似目录')
                except Exception as e:
                    logger.error(f'批次 {batch_idx + 1} 处理失败: {str(e)}', exc_info=True)
                    if status_callback:
                        status_callback(f'批次 {batch_idx + 1} 处理失败: {str(e)}')
        
        if status_callback:
            status_callback(f"分批处理完成，共找到 {len(all_groups)} 组相似目录")
            
        logger.info(f'分批处理完成，共找到 {len(all_groups)} 组相似目录')
        return all_groups
    
    def _compare_directories_similarity(self, 
                                      dir1: Dict[str, Any], 
                                      dir2: Dict[str, Any], 
                                      i: int, j: int,
                                      compare_size: bool, 
                                      similarity_threshold: float,
                                      progress_callback: Callable) -> Optional[Tuple[int, int, float]]:
        """比较两个目录的相似度（线程安全）"""
        try:
            from string_similarity import calculate_directory_name_similarity
            
            # 更新进度
            progress_callback()
            
            # 先检查大小是否相同（如果启用了大小比较）
            if compare_size and dir1['size'] != dir2['size']:
                return None
            
            # 性能优化9：直接获取预处理过的名称，避免重复调用os.path.basename
            dir1_name = dir1.get('name', os.path.basename(dir1['path']))
            dir2_name = dir2.get('name', os.path.basename(dir2['path']))
            
            # 优化10：直接计算相似度并比较，避免两次计算（原代码先调用is_similar_by_threshold再调用calculate_directory_name_similarity）
            similarity = calculate_directory_name_similarity(dir1_name, dir2_name)
            
            if similarity >= similarity_threshold:
                logger.debug(f'找到相似目录: "{dir1_name}" vs "{dir2_name}", 相似度: {similarity:.2f}%')
                return (i, j, similarity)
            
            return None
            
        except Exception as e:
            logger.error(f'比较目录相似度时出错: {str(e)}')
            return None
    
    def _build_similarity_groups(self, directories: List[Dict[str, Any]], similar_pairs: List[Tuple[int, int, float]]) -> List[List[Dict[str, Any]]]:
        """基于相似对构建相似组（使用并查集算法）"""
        if not similar_pairs:
            return []
        
        # 使用并查集算法构建连通组
        parent = list(range(len(directories)))
        
        def find(x):
            if parent[x] != x:
                parent[x] = find(parent[x])
            return parent[x]
        
        def union(x, y):
            px, py = find(x), find(y)
            if px != py:
                parent[px] = py
        
        # 性能优化11：按相似度排序，优先合并高相似度的对
        # 这有助于提高最终分组的质量
        similar_pairs.sort(key=lambda x: x[2], reverse=True)
        
        # 合并相似的目录
        for i, j, similarity in similar_pairs:
            union(i, j)
        
        # 构建最终的分组
        groups = {}
        for i in range(len(directories)):
            root = find(i)
            if root not in groups:
                groups[root] = []
            groups[root].append(directories[i])
        
        # 只返回包含2个或更多目录的组
        result_groups = [group for group in groups.values() if len(group) >= 2]
        
        # 性能优化12：每组内按相似度排序
        for group in result_groups:
            # 对于每组内的目录，我们无法直接知道它们之间的相似度
            # 但可以按路径排序，使结果更有条理
            group.sort(key=lambda d: d.get('path', ''))
            
            # 日志优化：只在组大小大于5或调试模式下打印详细信息
            group_size = len(group)
            group_names = [d.get('name', os.path.basename(d['path'])) for d in group]
            
            if group_size > 5:
                logger.info(f"构建相似目录组（{group_size} 个）: {group_names[0]}, {group_names[1]}, ... 等 {group_size - 2} 个目录")
            else:
                logger.info(f"构建相似目录组（{group_size} 个）: {', '.join(group_names)}")
        
        return result_groups


# 测试代码
if __name__ == '__main__':
    # 创建测试数据
    test_dirs = []
    for i in range(50):
        test_dirs.append({
            'path': f'/test/dir_{i}',
            'name': f'test_dir_{i}',
            'size': 1000 + i,
            'type': 'directory'
        })
    
    # 添加一些相似的目录
    test_dirs.extend([
        {'path': '/test/Photos', 'name': 'Photos', 'size': 5000, 'type': 'directory'},
        {'path': '/test/Photos v2.0', 'name': 'Photos v2.0', 'size': 5000, 'type': 'directory'},
        {'path': '/test/Music Files', 'name': 'Music Files', 'size': 3000, 'type': 'directory'},
        {'path': '/test/Music_Files', 'name': 'Music_Files', 'size': 3000, 'type': 'directory'},
    ])
    
    print(f"测试 {len(test_dirs)} 个目录")
    
    def progress_callback(processed, total):
        if processed % 100 == 0 or processed == total:
            print(f"进度: {processed}/{total} ({processed/total*100:.1f}%)")
    
    # 测试多线程相似度匹配
    matcher = MultiThreadSimilarityMatcher()
    start_time = time.time()
    
    groups = matcher.find_similar_directories(
        test_dirs, progress_callback, True, 95.0
    )
    
    end_time = time.time()
    
    print(f"\n测试完成，耗时: {end_time - start_time:.2f}秒")
    print(f"找到 {len(groups)} 组相似目录:")
    
    for i, group in enumerate(groups):
        group_names = [d['name'] for d in group]
        print(f"  组 {i+1}: {', '.join(group_names)}")