"""
优化的文件处理器 - 防止闪退并提高性能
主要改进：
1. 智能内存管理 - 根据可用内存动态调整处理策略
2. 增强异常处理 - 捕获并处理内存不足等异常
3. 流式处理 - 避免一次性加载大文件
4. 使用pandas/numpy优化性能
"""

import os
import gc
import psutil
import numpy as np
import pandas as pd
import logging
import mmap
from typing import Generator, Optional, Callable, List, Dict, Tuple
import warnings
warnings.filterwarnings('ignore', category=pd.errors.PerformanceWarning)


class OptimizedFileProcessor:
    """优化的文件处理器，专门处理大型DAT文件"""
    
    # 默认块大小 (50MB - 更保守的设置)
    DEFAULT_CHUNK_SIZE = 50 * 1024 * 1024
    
    # 内存使用阈值 (保留至少20%的系统内存)
    MEMORY_THRESHOLD = 0.8
    
    # 帧大小定义
    FRAME_SIZES = {
        '1092': 1092,
        '1024': 1024,
        '896': 896
    }
    
    def __init__(self):
        self.logger = logging.getLogger('optimized_processor')
        self._setup_logging()
        
    def _setup_logging(self):
        """设置日志"""
        if not self.logger.handlers:
            handler = logging.StreamHandler()
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
            )
            handler.setFormatter(formatter)
            self.logger.addHandler(handler)
            self.logger.setLevel(logging.INFO)
    
    def get_available_memory(self) -> int:
        """获取可用内存（字节）"""
        try:
            memory = psutil.virtual_memory()
            # 返回可用内存的80%，保留系统运行空间
            return int(memory.available * 0.8)
        except:
            # 如果无法获取，返回保守值 500MB
            return 500 * 1024 * 1024
    
    def detect_frame_format(self, file_path: str) -> Tuple[str, int]:
        """智能检测文件的帧格式"""
        file_size = os.path.getsize(file_path)
        
        # 尝试每种格式
        for format_name, frame_size in self.FRAME_SIZES.items():
            if file_size % frame_size == 0:
                self.logger.info(f"检测到{format_name}字节帧格式")
                return format_name, frame_size
        
        # 默认使用1092格式
        self.logger.warning("无法确定帧格式，默认使用1092字节")
        return '1092', 1092
    
    def calculate_optimal_chunk_size(self, file_size: int, frame_size: int) -> int:
        """计算最优的块大小"""
        available_memory = self.get_available_memory()
        
        # 块大小应该是帧大小的整数倍
        max_chunk_size = min(available_memory // 4, self.DEFAULT_CHUNK_SIZE)
        chunk_frames = max_chunk_size // frame_size
        
        # 至少处理100帧
        chunk_frames = max(100, chunk_frames)
        
        # 但不超过10000帧（避免单次处理过多）
        chunk_frames = min(10000, chunk_frames)
        
        chunk_size = chunk_frames * frame_size
        
        self.logger.info(f"优化块大小: {chunk_size / (1024*1024):.2f}MB ({chunk_frames}帧)")
        return chunk_size
    
    def read_file_streaming(self, file_path: str, 
                           frame_format: Optional[str] = None,
                           progress_callback: Optional[Callable] = None) -> Generator[List[Dict], None, None]:
        """流式读取文件，返回帧数据生成器"""
        
        try:
            # 检测帧格式
            if frame_format is None:
                frame_format, frame_size = self.detect_frame_format(file_path)
            else:
                frame_size = self.FRAME_SIZES.get(frame_format, 1092)
            
            file_size = os.path.getsize(file_path)
            total_frames = file_size // frame_size
            
            # 计算最优块大小
            chunk_size = self.calculate_optimal_chunk_size(file_size, frame_size)
            chunk_frames = chunk_size // frame_size
            
            self.logger.info(f"开始流式读取: {file_path}")
            self.logger.info(f"文件大小: {file_size/(1024**3):.2f}GB, 总帧数: {total_frames}")
            
            # 使用内存映射读取
            processed_frames = 0
            
            with open(file_path, 'rb') as f:
                # 对于超大文件使用内存映射
                if file_size > 500 * 1024 * 1024:  # 500MB
                    with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
                        yield from self._process_mmap_chunks(
                            mm, frame_format, frame_size, chunk_frames, 
                            total_frames, progress_callback
                        )
                else:
                    # 小文件直接读取
                    yield from self._process_file_chunks(
                        f, frame_format, frame_size, chunk_frames,
                        total_frames, progress_callback
                    )
                    
        except MemoryError as e:
            self.logger.error(f"内存不足: {e}")
            # 尝试强制垃圾回收
            gc.collect()
            raise Exception("内存不足，请关闭其他程序后重试")
        except Exception as e:
            self.logger.error(f"读取文件失败: {e}")
            raise
    
    def _process_mmap_chunks(self, mm, frame_format, frame_size, chunk_frames, 
                           total_frames, progress_callback) -> Generator[List[Dict], None, None]:
        """处理内存映射的块"""
        processed_frames = 0
        
        while processed_frames < total_frames:
            # 检查内存使用
            if psutil.virtual_memory().percent > 85:
                self.logger.warning("内存使用率过高，执行垃圾回收")
                gc.collect()
                
            # 计算本次处理的帧数
            frames_to_process = min(chunk_frames, total_frames - processed_frames)
            start_offset = processed_frames * frame_size
            end_offset = start_offset + frames_to_process * frame_size
            
            # 读取块数据
            chunk_data = mm[start_offset:end_offset]
            
            # 处理帧数据
            frames = self._parse_chunk_data(chunk_data, frame_format, frame_size)
            
            # 更新进度
            processed_frames += frames_to_process
            if progress_callback:
                progress = int(processed_frames / total_frames * 100)
                progress_callback(
                    progress, 
                    f"已处理 {processed_frames}/{total_frames} 帧",
                    f"内存使用: {psutil.virtual_memory().percent:.1f}%"
                )
            
            yield frames
            
            # 主动释放内存
            del chunk_data
            
    def _process_file_chunks(self, f, frame_format, frame_size, chunk_frames,
                           total_frames, progress_callback) -> Generator[List[Dict], None, None]:
        """处理文件块"""
        processed_frames = 0
        
        while processed_frames < total_frames:
            # 计算本次处理的帧数
            frames_to_process = min(chunk_frames, total_frames - processed_frames)
            
            # 读取块数据
            chunk_data = f.read(frames_to_process * frame_size)
            if not chunk_data:
                break
                
            # 处理帧数据
            frames = self._parse_chunk_data(chunk_data, frame_format, frame_size)
            
            # 更新进度
            processed_frames += frames_to_process
            if progress_callback:
                progress = int(processed_frames / total_frames * 100)
                progress_callback(
                    progress,
                    f"已处理 {processed_frames}/{total_frames} 帧",
                    f"内存使用: {psutil.virtual_memory().percent:.1f}%"
                )
            
            yield frames
            
            # 主动释放内存
            del chunk_data
    
    def _parse_chunk_data(self, chunk_data: bytes, frame_format: str, frame_size: int) -> List[Dict]:
        """解析块数据为帧列表"""
        frames = []
        
        if frame_format == '1092':
            frames = self._parse_1092_frames(chunk_data)
        elif frame_format == '1024':
            frames = self._parse_1024_frames(chunk_data)
        elif frame_format == '896':
            frames = self._parse_896_frames(chunk_data)
        
        return frames
    
    def _parse_1092_frames(self, data: bytes) -> List[Dict]:
        """解析1092字节格式的帧"""
        import struct
        
        # 定义1092字节帧结构
        dtype = np.dtype([
            ('sync_word', '>u4'),
            ('length', '>u4'),
            ('zero', '>u4'),
            ('t1', '>u4'),
            ('t2', '>u4'),
            ('frame_count', '>u4'),
            ('帧检验结果', '>u4'),
            ('帧同步状态', '>u4'),
            ('位滑状态', '>u4'),
            ('virtual_channel', '>u4'),
            ('frame_len', '>u4'),
            ('同步字长度', '>u4'),
            ('decode_status', '>u4'),
            ('同步字状态', '>u4'),
            ('f_001', '>u4', 2),
            ('f896', [
                ('sync', '>u4'),
                ('h1', '>u2'),
                ('h2', '>u4'),
                ('payload', 'S886')
            ]),
            ('f128', 'V128'),
            ('synn_end', '>u4'),
        ])
        
        # 解析数据 - 使用copy()创建可写副本
        arr = np.frombuffer(data, dtype=dtype).copy()
        
        # 批量处理虚拟信道
        arr['virtual_channel'] = arr['f896']['h1'] & 0x3F
        
        # 转换为字典列表（使用向量化操作）
        frames = []
        for item in arr:
            # 构建payload - 使用struct.pack保持大端序
            payload = (
                struct.pack('>I', int(item['f896']['sync'])) +      # 第64-67字节：帧同步头
                struct.pack('>H', int(item['f896']['h1'])) +        # 第68-69字节：版本号+航天器标识
                struct.pack('>I', int(item['f896']['h2'])) +        # 第70-73字节：包含虚拟信道帧计数
                item['f896']['payload'] +                           # 第74-959字节：数据
                item['f128']                                         # 第960-1087字节：RS校验
            )
            
            frames.append({
                'sync_word': int(item['sync_word']),
                'frame_count': int(item['frame_count']),
                'virtual_channel': int(item['virtual_channel']),
                'decode_status': int(item['decode_status']),
                'payload': payload,
                'raw_frame': None,  # 流式处理时不保存原始数据
                'frame_format': '1092',
                'frame_len': 1092,
                'frame_sync_status': int(item['帧同步状态']),
                'bit_slip_status': int(item['位滑状态']),
                '帧检验结果': int(item['帧检验结果']),
                '同步字状态': int(item['同步字状态'])
            })
        
        return frames
    
    def _parse_1024_frames(self, data: bytes) -> List[Dict]:
        """解析1024字节格式的帧"""
        frames = []
        num_frames = len(data) // 1024
        
        # 使用numpy批量处理
        for i in range(num_frames):
            offset = i * 1024
            frame_data = data[offset:offset + 1024]
            
            # 提取关键字段
            sync_word = int.from_bytes(frame_data[0:4], 'big')
            vcid = frame_data[5] & 0x3F
            # 虚拟信道帧计数是3字节，直接从第6-8字节读取，保持大端序
            vc_count = int.from_bytes(frame_data[6:9], 'big')
            
            frames.append({
                'sync_word': sync_word,
                'frame_count': vc_count,
                'virtual_channel': vcid,
                'decode_status': 0,
                'payload': frame_data,
                'raw_frame': None,
                'frame_format': '1024',
                'frame_len': 1024,
                'frame_sync_status': 1,
                'bit_slip_status': 0,
                '帧检验结果': 0,
                '同步字状态': 0
            })
        
        return frames
    
    def _parse_896_frames(self, data: bytes) -> List[Dict]:
        """解析896字节格式的帧"""
        frames = []
        num_frames = len(data) // 896
        
        for i in range(num_frames):
            offset = i * 896
            frame_data = data[offset:offset + 896]
            
            # 提取基本信息
            sync_word = int.from_bytes(frame_data[:4], 'big') if len(frame_data) >= 4 else 0
            vcid = frame_data[5] & 0x3F if len(frame_data) > 5 else 0
            frame_count = int.from_bytes(frame_data[6:9], 'big') if len(frame_data) >= 9 else i
            
            frames.append({
                'sync_word': sync_word,
                'frame_count': frame_count,
                'virtual_channel': vcid,
                'decode_status': 0,
                'payload': frame_data,
                'raw_frame': None,
                'frame_format': '896',
                'frame_len': 896,
                'frame_sync_status': 1,
                'bit_slip_status': 0,
                '帧检验结果': 0,
                '同步字状态': 0
            })
        
        return frames
    
    def read_file_batch(self, file_path: str, 
                       frame_format: Optional[str] = None,
                       progress_callback: Optional[Callable] = None) -> List[Dict]:
        """批量读取整个文件（用于小文件或需要全部数据的情况）"""
        all_frames = []
        
        try:
            # 使用流式读取
            for chunk_frames in self.read_file_streaming(file_path, frame_format, progress_callback):
                all_frames.extend(chunk_frames)
                
                # 检查内存使用
                if psutil.virtual_memory().percent > 90:
                    self.logger.error("内存使用率超过90%，停止读取")
                    raise MemoryError("内存不足")
        
        except Exception as e:
            self.logger.error(f"批量读取失败: {e}")
            # 清理已读取的数据
            del all_frames
            gc.collect()
            raise
        
        return all_frames
    
    def process_with_pandas(self, frames: List[Dict], operation: str = 'analyze') -> pd.DataFrame:
        """使用pandas进行高效数据处理"""
        try:
            # 转换为DataFrame
            df = pd.DataFrame(frames)
            
            if operation == 'analyze':
                # 分析操作
                stats = {
                    'total_frames': len(df),
                    'virtual_channels': df['virtual_channel'].value_counts().to_dict(),
                    'decode_status': df['decode_status'].value_counts().to_dict(),
                    'sync_errors': (df['frame_sync_status'] != 1).sum(),
                    'bit_slip_errors': (df['bit_slip_status'] != 0).sum()
                }
                return stats
            
            elif operation == 'filter':
                # 过滤操作示例
                return df[df['decode_status'] == 0]
                
        except Exception as e:
            self.logger.error(f"Pandas处理失败: {e}")
            raise
    
    def safe_cleanup(self):
        """安全清理资源"""
        gc.collect()
        self.logger.info(f"内存清理完成，当前使用率: {psutil.virtual_memory().percent:.1f}%")


# 使用示例
if __name__ == "__main__":
    processor = OptimizedFileProcessor()
    
    # 流式处理大文件
    file_path = "test_data/large_file.dat"
    
    def progress_callback(progress, status, detail):
        print(f"[{progress}%] {status} - {detail}")
    
    try:
        # 流式读取
        for frames_chunk in processor.read_file_streaming(file_path, progress_callback=progress_callback):
            # 处理每个块
            print(f"处理 {len(frames_chunk)} 帧")
            # 这里可以进行分析、保存等操作
            
    except Exception as e:
        print(f"错误: {e}")
    finally:
        processor.safe_cleanup()