# -*- coding: utf-8 -*-
"""
重复检测器
负责检测视频文件中的重复项
"""
from collections import defaultdict
from typing import List, Dict, Set, Tuple
from .models import VideoInfo, VideoType
from .parser import VideoNameParser
from .video_group import VideoGroup, VideoVariantGroup


class DuplicateDetector:
    """重复检测器"""
    
    def __init__(self):
        self.parser = VideoNameParser()
    
    def detect_single_file_duplicates(self, filenames: List[str]) -> Dict[str, any]:
        """
        检测单个文件列表中的重复项
        
        :param filenames: 文件名列表
        :return: 检测结果字典
        """
        # 解析所有文件名
        parsed_videos = [self.parser.parse(filename) for filename in filenames]
        
        # 分离脏数据和正常数据
        clean_videos = [v for v in parsed_videos if not v.is_dirty]
        dirty_videos = [v for v in parsed_videos if v.is_dirty]
        
        # 按基础番号分组
        video_groups = self._group_by_base_id(clean_videos)
        
        # 找出有重复的组
        duplicates = {base_id: group for base_id, group in video_groups.items() if group.has_duplicates}
        
        # 生成统计信息
        stats = self._generate_stats_new(clean_videos, dirty_videos, duplicates)
        
        return {
            'total_files': len(filenames),
            'clean_files': len(clean_videos),
            'dirty_files': len(dirty_videos),
            'duplicates': duplicates,
            'dirty_data': dirty_videos,
            'stats': stats,
            'parsed_videos': clean_videos,
            'video_groups': video_groups
        }
    
    def detect_cross_file_duplicates(self, file1_names: List[str], file2_names: List[str]) -> Dict[str, any]:
        """
        检测两个文件列表之间的重复项
        
        :param file1_names: 第一个文件列表
        :param file2_names: 第二个文件列表
        :return: 检测结果字典
        """
        # 解析两个文件列表
        file1_videos = [self.parser.parse(filename) for filename in file1_names]
        file2_videos = [self.parser.parse(filename) for filename in file2_names]
        
        # 分离脏数据
        file1_clean = [v for v in file1_videos if not v.is_dirty]
        file1_dirty = [v for v in file1_videos if v.is_dirty]
        file2_clean = [v for v in file2_videos if not v.is_dirty]
        file2_dirty = [v for v in file2_videos if v.is_dirty]
        
        # 为两个文件分别建立VideoGroup结构
        file1_groups = self._group_by_base_id(file1_clean)
        file2_groups = self._group_by_base_id(file2_clean)
        
        # 检测各自内部重复（使用新架构）
        file1_internal_dups = {base_id: group for base_id, group in file1_groups.items() if group.has_duplicates}
        file2_internal_dups = {base_id: group for base_id, group in file2_groups.items() if group.has_duplicates}
        
        # 检测跨文件重复（使用新架构）
        cross_duplicates = self._find_cross_duplicates_new(file1_groups, file2_groups)
        
        # 生成统计信息
        stats = self._generate_cross_stats_new(
            file1_clean, file1_dirty, file2_clean, file2_dirty,
            file1_internal_dups, file2_internal_dups, cross_duplicates
        )
        
        return {
            'file1': {
                'total_files': len(file1_names),
                'clean_files': len(file1_clean),
                'dirty_files': len(file1_dirty),
                'internal_duplicates': file1_internal_dups,
                'dirty_data': file1_dirty,
                'video_groups': file1_groups
            },
            'file2': {
                'total_files': len(file2_names),
                'clean_files': len(file2_clean),
                'dirty_files': len(file2_dirty),
                'internal_duplicates': file2_internal_dups,
                'dirty_data': file2_dirty,
                'video_groups': file2_groups
            },
            'cross_duplicates': cross_duplicates,
            'stats': stats
        }
    
    def _find_duplicates(self, videos: List[VideoInfo]) -> Dict[str, List[VideoInfo]]:
        """
        在视频列表中查找重复项
        
        :param videos: 视频信息列表
        :return: 重复项字典 {核心番号: [重复的VideoInfo列表]}
        """
        # 按核心番号分组
        core_id_groups = defaultdict(list)
        for video in videos:
            core_id_groups[video.clean_core_id].append(video)
        
        # 筛选出有重复的组
        duplicates = {}
        for core_id, video_list in core_id_groups.items():
            if len(video_list) > 1:
                # 排序：非分段文件在前，分段文件按CD号排序
                video_list.sort(key=lambda v: (v.is_segment, v.segment_number or '0'))
                duplicates[core_id] = video_list
        
        return duplicates
    
    def _find_cross_duplicates(self, file1_videos: List[VideoInfo], file2_videos: List[VideoInfo]) -> Dict[str, Dict[str, List[VideoInfo]]]:
        """
        查找两个文件列表之间的重复项
        
        :param file1_videos: 第一个文件的视频列表
        :param file2_videos: 第二个文件的视频列表
        :return: 跨文件重复项字典
        """
        # 构建第一个文件的核心番号映射
        file1_core_ids = defaultdict(list)
        for video in file1_videos:
            file1_core_ids[video.clean_core_id].append(video)
        
        # 构建第二个文件的核心番号映射
        file2_core_ids = defaultdict(list)
        for video in file2_videos:
            file2_core_ids[video.clean_core_id].append(video)
        
        # 查找共同的核心番号
        common_core_ids = set(file1_core_ids.keys()) & set(file2_core_ids.keys())
        
        cross_duplicates = {}
        for core_id in common_core_ids:
            cross_duplicates[core_id] = {
                'file1': file1_core_ids[core_id],
                'file2': file2_core_ids[core_id]
            }
        
        return cross_duplicates
    
    def _find_cross_duplicates_new(self, file1_groups: Dict[str, VideoGroup], file2_groups: Dict[str, VideoGroup]) -> Dict[str, Dict[str, VideoGroup]]:
        """
        使用新架构查找两个文件列表之间的重复项
        
        :param file1_groups: 第一个文件的VideoGroup字典
        :param file2_groups: 第二个文件的VideoGroup字典
        :return: 跨文件重复项字典
        """
        # 查找共同的基础番号
        common_base_ids = set(file1_groups.keys()) & set(file2_groups.keys())
        
        cross_duplicates = {}
        for base_id in common_base_ids:
            cross_duplicates[base_id] = {
                'file1_group': file1_groups[base_id],
                'file2_group': file2_groups[base_id]
            }
        
        return cross_duplicates
    
    def _group_by_base_id(self, videos: List[VideoInfo]) -> Dict[str, VideoGroup]:
        """
        按基础番号分组视频
        
        :param videos: 视频信息列表
        :return: 视频组字典
        """
        video_groups = {}
        
        for video in videos:
            base_id = video.clean_core_id  # 获取基础番号
            
            if base_id not in video_groups:
                video_groups[base_id] = VideoGroup(
                    base_id=base_id,
                    video_type=video.video_type
                )
            
            video_groups[base_id].add_video(video)
        
        return video_groups
    
    def _generate_stats_new(self, clean_videos: List[VideoInfo], dirty_videos: List[VideoInfo], duplicates: Dict[str, VideoGroup]) -> Dict[str, any]:
        """生成新的统计信息"""
        # 按类型统计
        type_counts = defaultdict(int)
        for video in clean_videos:
            type_counts[video.video_type.value] += 1
        
        # 重复统计
        duplicate_files = sum(group.total_files for group in duplicates.values())
        unique_files = len(clean_videos) - duplicate_files
        
        return {
            'type_distribution': dict(type_counts),
            'unique_files': unique_files,
            'duplicate_files': duplicate_files,
            'duplicate_groups': len(duplicates),
            'dirty_files': len(dirty_videos)
        }
    
    def _generate_stats(self, clean_videos: List[VideoInfo], dirty_videos: List[VideoInfo], duplicates: Dict[str, List[VideoInfo]]) -> Dict[str, any]:
        """生成单文件统计信息"""
        # 按类型统计
        type_counts = defaultdict(int)
        for video in clean_videos:
            type_counts[video.video_type.value] += 1
        
        # 重复统计
        duplicate_count = sum(len(video_list) for video_list in duplicates.values())
        unique_count = len(clean_videos) - duplicate_count
        
        return {
            'type_distribution': dict(type_counts),
            'unique_files': unique_count,
            'duplicate_files': duplicate_count,
            'duplicate_groups': len(duplicates),
            'dirty_files': len(dirty_videos)
        }
    
    def _generate_cross_stats_new(self, file1_clean: List[VideoInfo], file1_dirty: List[VideoInfo],
                                 file2_clean: List[VideoInfo], file2_dirty: List[VideoInfo],
                                 file1_dups: Dict[str, VideoGroup], file2_dups: Dict[str, VideoGroup], 
                                 cross_dups: Dict[str, Dict[str, VideoGroup]]) -> Dict[str, any]:
        """生成跨文件统计信息（新架构）"""
        # 计算跨文件重复涉及的文件总数
        cross_duplicate_files = 0
        for cross_dup in cross_dups.values():
            cross_duplicate_files += cross_dup['file1_group'].total_files
            cross_duplicate_files += cross_dup['file2_group'].total_files
        
        # 计算各文件内部重复涉及的文件总数
        file1_duplicate_files = sum(group.total_files for group in file1_dups.values())
        file2_duplicate_files = sum(group.total_files for group in file2_dups.values())
        
        return {
            'total_cross_duplicates': len(cross_dups),
            'cross_duplicate_files': cross_duplicate_files,
            'file1_unique': len(file1_clean) - file1_duplicate_files,
            'file2_unique': len(file2_clean) - file2_duplicate_files,
            'file1_internal_duplicates': len(file1_dups),
            'file2_internal_duplicates': len(file2_dups)
        }

    def _generate_cross_stats(self, file1_clean: List[VideoInfo], file1_dirty: List[VideoInfo],
                             file2_clean: List[VideoInfo], file2_dirty: List[VideoInfo],
                             file1_dups: Dict, file2_dups: Dict, cross_dups: Dict) -> Dict[str, any]:
        """生成跨文件统计信息（旧方法，保留兼容性）"""
        return {
            'total_cross_duplicates': len(cross_dups),
            'cross_duplicate_files': sum(len(dup_info['file1']) + len(dup_info['file2']) for dup_info in cross_dups.values()),
            'file1_unique': len(file1_clean) - sum(len(video_list) for video_list in file1_dups.values()),
            'file2_unique': len(file2_clean) - sum(len(video_list) for video_list in file2_dups.values()),
            'file1_internal_duplicates': len(file1_dups),
            'file2_internal_duplicates': len(file2_dups)
        }