import numpy as np
import pandas as pd
import logging
import os
import time
from typing import Dict, List, Tuple, Optional, Callable
from multiprocessing import Pool, cpu_count
import gc
from functools import partial
from logger import logger


class HighPerformanceComparer:
    """高性能帧数据比对器
    
    使用Pandas和NumPy的向量化操作大幅提升比对性能
    """
    
    def __init__(self):
        self.logger = logging.getLogger('frame_analyzer')
        
    def compare_files_vectorized(self, 
                                frames1: List[dict], 
                                frames2: List[dict],
                                compare_range: Optional[Tuple[int, int]] = None,
                                progress_callback: Optional[Callable] = None,
                                use_multiprocessing: bool = True) -> Dict:
        """使用向量化操作比对两个文件的帧数据
        
        Args:
            frames1: 基准文件的帧列表
            frames2: 目标文件的帧列表
            compare_range: 比对范围 (start, end)
            progress_callback: 进度回调函数
            use_multiprocessing: 是否使用多进程
            
        Returns:
            比对结果字典
        """
        try:
            start_time = time.time()
            
            # 内存检查
            total_frames = len(frames1) + len(frames2)
            estimated_memory_mb = total_frames * 0.002  # 估算每帧需要2KB
            
            # 尝试导入psutil进行内存检查
            try:
                import psutil
                memory = psutil.virtual_memory()
                available_mb = memory.available / (1024 * 1024)
                
                if available_mb < estimated_memory_mb * 1.5:  # 需要1.5倍的安全余量
                    self.logger.warning(f"内存可能不足: 需要约{estimated_memory_mb:.0f}MB，可用{available_mb:.0f}MB")
                    gc.collect()  # 尝试释放内存
                    
                    # 如果内存严重不足，禁用多进程
                    if available_mb < estimated_memory_mb:
                        use_multiprocessing = False
                        self.logger.warning("内存不足，禁用多进程处理")
            except ImportError:
                pass
            
            # 将帧数据转换为Pandas DataFrame以提高查询效率
            df1 = self._frames_to_dataframe(frames1)
            df2 = self._frames_to_dataframe(frames2)
            
            if progress_callback:
                progress_callback(10, "数据预处理", "转换为DataFrame格式")
            
            # 使用向量化操作查找共同帧
            common_counts = self._find_common_frames_vectorized(df1, df2)
            
            if progress_callback:
                progress_callback(20, "查找共同帧", f"发现 {len(common_counts)} 个共同帧")
            
            # 按虚拟信道分组
            vc_groups1 = df1.groupby(level='virtual_channel')
            vc_groups2 = df2.groupby(level='virtual_channel')
            
            # 获取所有虚拟信道ID（从索引中提取）
            vc_ids1 = df1.index.get_level_values('virtual_channel').unique()
            vc_ids2 = df2.index.get_level_values('virtual_channel').unique()
            all_vc_ids = sorted(set(vc_ids1) | set(vc_ids2))
            
            self.logger.info(f"发现 {len(all_vc_ids)} 个虚拟信道")
            
            # 初始化结果
            comparison_result = {
                'total_frames': [len(frames1), len(frames2)],
                'frame_results': [],
                'matching_frames': 0,
                'virtual_channel_results': {},
                'processing_time': 0
            }
            
            # 使用多进程或单进程处理
            if use_multiprocessing and len(all_vc_ids) > 1:
                # 多进程处理不同虚拟信道
                vc_results = self._compare_virtual_channels_parallel(
                    vc_groups1, vc_groups2, all_vc_ids, compare_range, progress_callback
                )
            else:
                # 单进程处理
                vc_results = self._compare_virtual_channels_sequential(
                    vc_groups1, vc_groups2, all_vc_ids, compare_range, progress_callback
                )
            
            # 合并结果
            for vc_id, vc_result in vc_results.items():
                comparison_result['virtual_channel_results'][vc_id] = vc_result
                comparison_result['matching_frames'] += vc_result['matching_frames']
                comparison_result['frame_results'].extend(vc_result['frame_comparisons'])
            
            # 记录处理时间
            comparison_result['processing_time'] = time.time() - start_time
            
            if progress_callback:
                progress_callback(100, "比对完成", 
                                f"总耗时: {comparison_result['processing_time']:.2f}秒")
            
            self.logger.info(f"高性能比对完成，耗时: {comparison_result['processing_time']:.2f}秒")
            
            return comparison_result
            
        except Exception as e:
            self.logger.error(f"高性能比对失败: {str(e)}")
            raise
            
    def _frames_to_dataframe(self, frames: List[dict]) -> pd.DataFrame:
        """将帧列表转换为DataFrame格式
        
        提取关键字段并创建索引以提高查询效率
        """
        # 提取关键字段
        data = []
        for idx, frame in enumerate(frames):
            data.append({
                'index': idx,
                'frame_count': int(frame['frame_count']),
                'virtual_channel': int(frame.get('virtual_channel', 0)),
                'decode_status': int(frame.get('decode_status', 0)),
                'payload_hash': hash(frame['payload'])  # 使用哈希值进行快速比较
            })
        
        df = pd.DataFrame(data)
        # 设置多级索引以提高查询效率
        df.set_index(['virtual_channel', 'frame_count'], inplace=True)
        df.sort_index(inplace=True)
        
        return df
        
    def _find_common_frames_vectorized(self, df1: pd.DataFrame, df2: pd.DataFrame) -> np.ndarray:
        """使用向量化操作查找共同的帧计数"""
        # 获取所有帧计数
        counts1 = df1.reset_index()['frame_count'].unique()
        counts2 = df2.reset_index()['frame_count'].unique()
        
        # 使用NumPy的intersect1d进行高效交集运算
        common_counts = np.intersect1d(counts1, counts2)
        
        return common_counts
        
    def _compare_virtual_channels_parallel(self, 
                                         vc_groups1, 
                                         vc_groups2, 
                                         all_vc_ids: List[int],
                                         compare_range: Optional[Tuple[int, int]],
                                         progress_callback: Optional[Callable]) -> Dict:
        """使用多进程并行比对不同虚拟信道"""
        # 准备进程池
        num_processes = min(cpu_count(), len(all_vc_ids))
        
        # 创建部分函数
        compare_func = partial(self._compare_single_virtual_channel_worker,
                              compare_range=compare_range)
        
        # 准备任务数据
        tasks = []
        for vc_id in all_vc_ids:
            group1 = vc_groups1.get_group(vc_id) if vc_id in vc_groups1.groups else None
            group2 = vc_groups2.get_group(vc_id) if vc_id in vc_groups2.groups else None
            tasks.append((vc_id, group1, group2))
        
        # 执行并行处理
        results = {}
        with Pool(processes=num_processes) as pool:
            for i, (vc_id, result) in enumerate(pool.starmap(compare_func, tasks)):
                results[vc_id] = result
                if progress_callback:
                    progress = 30 + int((i + 1) / len(all_vc_ids) * 60)
                    progress_callback(progress, f"处理虚拟信道 {vc_id}", 
                                    f"已完成 {i + 1}/{len(all_vc_ids)}")
        
        return results
        
    def _compare_virtual_channels_sequential(self,
                                           vc_groups1,
                                           vc_groups2,
                                           all_vc_ids: List[int],
                                           compare_range: Optional[Tuple[int, int]],
                                           progress_callback: Optional[Callable]) -> Dict:
        """顺序比对不同虚拟信道"""
        results = {}
        
        for i, vc_id in enumerate(all_vc_ids):
            group1 = vc_groups1.get_group(vc_id) if vc_id in vc_groups1.groups else None
            group2 = vc_groups2.get_group(vc_id) if vc_id in vc_groups2.groups else None
            
            vc_id, result = self._compare_single_virtual_channel_worker(
                vc_id, group1, group2, compare_range
            )
            
            results[vc_id] = result
            
            if progress_callback:
                progress = 30 + int((i + 1) / len(all_vc_ids) * 60)
                progress_callback(progress, f"处理虚拟信道 {vc_id}", 
                                f"已完成 {i + 1}/{len(all_vc_ids)}")
        
        return results
        
    def _compare_single_virtual_channel_worker(self,
                                             vc_id: int,
                                             group1: Optional[pd.DataFrame],
                                             group2: Optional[pd.DataFrame],
                                             compare_range: Optional[Tuple[int, int]]) -> Tuple[int, Dict]:
        """比对单个虚拟信道的工作函数（用于多进程）"""
        try:
            # 初始化结果
            vc_result = {
                'vc_id': vc_id,
                'frames_in_base': len(group1) if group1 is not None else 0,
                'frames_in_target': len(group2) if group2 is not None else 0,
                'matching_frames': 0,
                'mismatching_frames': 0,
                'missing_in_target': 0,
                'missing_in_base': 0,
                'frame_comparisons': []
            }
            
            # 处理空组的情况
            if group1 is None and group2 is None:
                return vc_id, vc_result
                
            if group1 is None:
                # 基准文件中没有此虚拟信道
                vc_result['missing_in_base'] = len(group2)
                for _, row in group2.iterrows():
                    vc_result['frame_comparisons'].append({
                        'frame_count': row.name[1],  # frame_count是第二个索引
                        'type': 'missing_in_base',
                        'vc_id': vc_id
                    })
                return vc_id, vc_result
                
            if group2 is None:
                # 目标文件中没有此虚拟信道
                vc_result['missing_in_target'] = len(group1)
                for _, row in group1.iterrows():
                    vc_result['frame_comparisons'].append({
                        'frame_count': row.name[1],  # frame_count是第二个索引
                        'type': 'missing_in_target',
                        'vc_id': vc_id
                    })
                return vc_id, vc_result
            
            # 获取帧计数列表
            counts1 = group1.reset_index()['frame_count'].values
            counts2 = group2.reset_index()['frame_count'].values
            
            # 使用NumPy进行高效的集合运算
            all_counts = np.unique(np.concatenate([counts1, counts2]))
            common_counts = np.intersect1d(counts1, counts2)
            missing_in_target = np.setdiff1d(counts1, counts2)
            missing_in_base = np.setdiff1d(counts2, counts1)
            
            # 处理缺失的帧
            for count in missing_in_target:
                vc_result['missing_in_target'] += 1
                vc_result['frame_comparisons'].append({
                    'frame_count': int(count),
                    'type': 'missing_in_target',
                    'vc_id': vc_id
                })
                
            for count in missing_in_base:
                vc_result['missing_in_base'] += 1
                vc_result['frame_comparisons'].append({
                    'frame_count': int(count),
                    'type': 'missing_in_base',
                    'vc_id': vc_id
                })
            
            # 批量比对共同的帧
            if len(common_counts) > 0:
                # 使用向量化操作比对payload哈希值
                group1_common = group1.loc[(vc_id, common_counts), :]
                group2_common = group2.loc[(vc_id, common_counts), :]
                
                # 比较哈希值
                hash_match = (group1_common['payload_hash'].values == 
                             group2_common['payload_hash'].values)
                
                # 统计匹配和不匹配的帧
                vc_result['matching_frames'] = np.sum(hash_match)
                vc_result['mismatching_frames'] = np.sum(~hash_match)
                
                # 记录比对结果
                for i, count in enumerate(common_counts):
                    if hash_match[i]:
                        vc_result['frame_comparisons'].append({
                            'frame_count': int(count),
                            'type': 'matching',
                            'vc_id': vc_id
                        })
                    else:
                        # 对于不匹配的帧，可以进一步详细比对（这里简化处理）
                        vc_result['frame_comparisons'].append({
                            'frame_count': int(count),
                            'type': 'content_mismatch',
                            'vc_id': vc_id,
                            'differences': [{'field': 'payload', 'differences': []}]
                        })
            
            return vc_id, vc_result
            
        except Exception as e:
            self.logger.error(f"比对虚拟信道 {vc_id} 失败: {str(e)}")
            # 返回错误结果
            return vc_id, {
                'vc_id': vc_id,
                'error': str(e),
                'frames_in_base': 0,
                'frames_in_target': 0,
                'matching_frames': 0,
                'mismatching_frames': 0,
                'missing_in_target': 0,
                'missing_in_base': 0,
                'frame_comparisons': []
            }


class StreamingComparer:
    """流式比对器，用于处理超大文件
    
    通过分块读取和处理，减少内存占用
    """
    
    def __init__(self, chunk_size: int = 10000):
        self.chunk_size = chunk_size
        self.logger = logging.getLogger('frame_analyzer')
        
    def compare_large_files_streaming(self,
                                    frames_generator1,
                                    frames_generator2,
                                    total_frames1: int,
                                    total_frames2: int,
                                    compare_range: Optional[Tuple[int, int]] = None,
                                    progress_callback: Optional[Callable] = None) -> Dict:
        """流式比对大文件
        
        Args:
            frames_generator1: 基准文件的帧生成器
            frames_generator2: 目标文件的帧生成器
            total_frames1: 基准文件总帧数
            total_frames2: 目标文件总帧数
            compare_range: 比对范围
            progress_callback: 进度回调
            
        Returns:
            比对结果
        """
        try:
            start_time = time.time()
            
            # 初始化结果
            comparison_result = {
                'total_frames': [total_frames1, total_frames2],
                'frame_results': [],
                'matching_frames': 0,
                'virtual_channel_results': {},
                'processing_time': 0
            }
            
            # 虚拟信道统计
            vc_stats = {}
            
            # 创建高性能比对器
            hp_comparer = HighPerformanceComparer()
            
            # 分块处理
            chunk_count = 0
            frames_buffer1 = []
            frames_buffer2 = []
            
            # 读取和处理数据块
            for frame1, frame2 in zip(frames_generator1, frames_generator2):
                if frame1:
                    frames_buffer1.append(frame1)
                if frame2:
                    frames_buffer2.append(frame2)
                
                # 当缓冲区达到块大小时，进行比对
                if len(frames_buffer1) >= self.chunk_size or len(frames_buffer2) >= self.chunk_size:
                    # 比对当前块
                    chunk_result = hp_comparer.compare_files_vectorized(
                        frames_buffer1, frames_buffer2, compare_range, 
                        progress_callback=None, use_multiprocessing=False
                    )
                    
                    # 合并结果
                    self._merge_chunk_results(comparison_result, chunk_result, vc_stats)
                    
                    # 清空缓冲区
                    frames_buffer1.clear()
                    frames_buffer2.clear()
                    chunk_count += 1
                    
                    if progress_callback:
                        progress = int(chunk_count * self.chunk_size / max(total_frames1, total_frames2) * 100)
                        progress_callback(progress, f"处理数据块 {chunk_count}", 
                                        f"已处理约 {chunk_count * self.chunk_size} 帧")
                    
                    # 强制垃圾回收
                    gc.collect()
            
            # 处理剩余的帧
            if frames_buffer1 or frames_buffer2:
                chunk_result = hp_comparer.compare_files_vectorized(
                    frames_buffer1, frames_buffer2, compare_range,
                    progress_callback=None, use_multiprocessing=False
                )
                self._merge_chunk_results(comparison_result, chunk_result, vc_stats)
            
            # 更新虚拟信道结果
            comparison_result['virtual_channel_results'] = vc_stats
            
            # 记录处理时间
            comparison_result['processing_time'] = time.time() - start_time
            
            if progress_callback:
                progress_callback(100, "流式比对完成", 
                                f"总耗时: {comparison_result['processing_time']:.2f}秒")
            
            self.logger.info(f"流式比对完成，耗时: {comparison_result['processing_time']:.2f}秒")
            
            return comparison_result
            
        except Exception as e:
            self.logger.error(f"流式比对失败: {str(e)}")
            raise
            
    def _merge_chunk_results(self, total_result: Dict, chunk_result: Dict, vc_stats: Dict):
        """合并块比对结果"""
        # 更新总体统计
        total_result['matching_frames'] += chunk_result['matching_frames']
        total_result['frame_results'].extend(chunk_result['frame_results'])
        
        # 更新虚拟信道统计
        for vc_id, vc_result in chunk_result['virtual_channel_results'].items():
            if vc_id not in vc_stats:
                vc_stats[vc_id] = {
                    'vc_id': vc_id,
                    'frames_in_base': 0,
                    'frames_in_target': 0,
                    'matching_frames': 0,
                    'mismatching_frames': 0,
                    'missing_in_target': 0,
                    'missing_in_base': 0,
                    'frame_comparisons': []
                }
            
            # 累加统计数据
            for key in ['frames_in_base', 'frames_in_target', 'matching_frames', 
                       'mismatching_frames', 'missing_in_target', 'missing_in_base']:
                vc_stats[vc_id][key] += vc_result[key]
            
            # 合并帧比对结果
            vc_stats[vc_id]['frame_comparisons'].extend(vc_result['frame_comparisons']) 