"""
高效帧数据转换器 - 使用NumPy向量化操作
专为处理数百MB至GB级DAT文件设计
"""

import os
import time
import logging
import numpy as np
from typing import Optional, Callable, Tuple
from logger import logger


class FastFrameConverter:
    """高性能帧数据转换器
    
    使用NumPy向量化操作替代Python循环，大幅提升大文件处理性能
    """
    
    # 定义帧格式常量
    FRAME_SIZE_1092 = 1092  # 完整帧大小
    FRAME_SIZE_1024 = 1024  # CCSDS标准帧大小
    
    # 数据区偏移量和大小
    DATA_OFFSET = 64      # 数据区起始偏移量
    F896_SIZE = 896       # f896数据区大小
    F128_SIZE = 128       # f128数据区大小
    TOTAL_DATA_SIZE = F896_SIZE + F128_SIZE  # 1024字节
    
    def __init__(self):
        """初始化转换器"""
        self.logger = logging.getLogger('fast_frame_converter')
        self.logger.info("FastFrameConverter 初始化完成")
    
    def convert_frames_vectorized(self, 
                                 source_path: str, 
                                 target_path: str,
                                 extract_mode: str = '1024',
                                 custom_range: Optional[Tuple[int, int]] = None,
                                 progress_callback: Optional[Callable[[int, str, str], None]] = None,
                                 chunk_size_mb: int = 100) -> dict:
        """使用NumPy向量化操作进行高效帧数据转换
        
        Args:
            source_path: 源文件路径
            target_path: 目标文件路径
            extract_mode: 提取模式 ('1024', '896', 'custom')
            custom_range: 自定义范围 (start_byte, end_byte)
            progress_callback: 进度回调函数
            chunk_size_mb: 块大小(MB)，用于内存优化
            
        Returns:
            dict: 转换结果统计信息
        """
        start_time = time.time()
        
        try:
            # 验证输入参数
            if not os.path.exists(source_path):
                raise FileNotFoundError(f"源文件不存在: {source_path}")
            
            # 获取文件信息
            file_size = os.path.getsize(source_path)
            if file_size % self.FRAME_SIZE_1092 != 0:
                raise ValueError(f"源文件大小({file_size}字节)不是{self.FRAME_SIZE_1092}的整数倍")
            
            total_frames = file_size // self.FRAME_SIZE_1092
            self.logger.info(f"开始转换: {source_path} -> {target_path}")
            self.logger.info(f"文件大小: {file_size:,} 字节 ({file_size / (1024**3):.3f} GB)")
            self.logger.info(f"总帧数: {total_frames:,}")
            
            if progress_callback:
                progress_callback(5, "分析文件", f"文件大小: {file_size / (1024**2):.1f} MB, 总帧数: {total_frames:,}")
            
            # 确定提取参数
            extract_params = self._get_extract_params(extract_mode, custom_range)
            start_byte, end_byte, output_size = extract_params
            
            self.logger.info(f"提取模式: {extract_mode}")
            self.logger.info(f"提取范围: 字节 {start_byte} 到 {end_byte-1} (大小: {output_size})")
            
            # 确定最优块大小
            optimal_chunk_size = self._calculate_optimal_chunk_size(file_size, chunk_size_mb)
            frames_per_chunk = optimal_chunk_size // self.FRAME_SIZE_1092
            
            self.logger.info(f"使用块大小: {optimal_chunk_size / (1024**2):.1f} MB ({frames_per_chunk:,} 帧/块)")
            
            if progress_callback:
                progress_callback(10, "开始转换", f"块大小: {frames_per_chunk:,} 帧/块")
            
            # 执行向量化转换
            result = self._perform_vectorized_conversion(
                source_path, target_path, total_frames, frames_per_chunk,
                start_byte, end_byte, output_size, progress_callback
            )
            
            # 计算处理时间和速度
            elapsed_time = time.time() - start_time
            throughput_mbps = (file_size / (1024**2)) / elapsed_time
            
            result.update({
                'elapsed_time': elapsed_time,
                'throughput_mbps': throughput_mbps,
                'source_size_mb': file_size / (1024**2),
                'target_size_mb': (total_frames * output_size) / (1024**2)
            })
            
            self.logger.info(f"转换完成: 耗时 {elapsed_time:.2f}s, 吞吐量 {throughput_mbps:.1f} MB/s")
            
            if progress_callback:
                progress_callback(100, "转换完成", 
                                f"耗时 {elapsed_time:.1f}s, 吞吐量 {throughput_mbps:.1f} MB/s")
            
            return result
            
        except Exception as e:
            self.logger.error(f"转换失败: {str(e)}")
            if progress_callback:
                progress_callback(0, "转换失败", str(e))
            raise
    
    def _get_extract_params(self, extract_mode: str, custom_range: Optional[Tuple[int, int]]) -> Tuple[int, int, int]:
        """获取提取参数
        
        Returns:
            Tuple[start_byte, end_byte, output_size]
        """
        if extract_mode == '1024':
            # f896 + f128 数据区 (1024字节)
            return (self.DATA_OFFSET, self.DATA_OFFSET + self.TOTAL_DATA_SIZE, self.TOTAL_DATA_SIZE)
        elif extract_mode == '896':
            # 仅f896数据区 (896字节)
            return (self.DATA_OFFSET, self.DATA_OFFSET + self.F896_SIZE, self.F896_SIZE)
        elif extract_mode == 'custom' and custom_range:
            start_byte, end_byte = custom_range
            output_size = end_byte - start_byte + 1
            return (start_byte, end_byte + 1, output_size)  # +1 because end_byte is inclusive
        else:
            raise ValueError(f"无效的提取模式或自定义范围: {extract_mode}, {custom_range}")
    
    def _calculate_optimal_chunk_size(self, file_size: int, chunk_size_mb: int) -> int:
        """计算最优块大小
        
        考虑内存限制和性能平衡
        """
        # 基础块大小
        base_chunk_size = chunk_size_mb * 1024 * 1024
        
        # 确保块大小是帧大小的整数倍
        frames_per_chunk = base_chunk_size // self.FRAME_SIZE_1092
        if frames_per_chunk == 0:
            frames_per_chunk = 1
        
        # 对于小文件，使用更小的块
        if file_size < base_chunk_size:
            frames_per_chunk = file_size // self.FRAME_SIZE_1092
        
        # 限制最大块大小以避免内存问题
        max_frames_per_chunk = (500 * 1024 * 1024) // self.FRAME_SIZE_1092  # 最大500MB
        if frames_per_chunk > max_frames_per_chunk:
            frames_per_chunk = max_frames_per_chunk
        
        return frames_per_chunk * self.FRAME_SIZE_1092
    
    def _perform_vectorized_conversion(self, source_path: str, target_path: str, 
                                     total_frames: int, frames_per_chunk: int,
                                     start_byte: int, end_byte: int, output_size: int,
                                     progress_callback: Optional[Callable[[int, str, str], None]] = None) -> dict:
        """执行向量化转换的核心逻辑"""
        processed_frames = 0
        
        with open(source_path, 'rb') as src_file, open(target_path, 'wb') as tgt_file:
            
            while processed_frames < total_frames:
                # 计算当前块的帧数
                current_chunk_frames = min(frames_per_chunk, total_frames - processed_frames)
                chunk_size_bytes = current_chunk_frames * self.FRAME_SIZE_1092
                
                # 更新进度
                if progress_callback:
                    progress = int(10 + (processed_frames / total_frames) * 85)
                    progress_callback(progress, "处理数据块", 
                                    f"帧 {processed_frames:,} - {processed_frames + current_chunk_frames:,}")
                
                # 读取数据块到NumPy数组
                chunk_data = np.frombuffer(src_file.read(chunk_size_bytes), dtype=np.uint8)
                
                if len(chunk_data) == 0:
                    break
                
                # 重塑为二维数组: (帧数, 每帧字节数)
                chunk_frames = chunk_data.reshape(current_chunk_frames, self.FRAME_SIZE_1092)
                
                # 向量化提取指定字节范围 - 这是关键的性能优化点
                extracted_data = chunk_frames[:, start_byte:end_byte]
                
                # 直接写入目标文件
                tgt_file.write(extracted_data.tobytes())
                
                processed_frames += current_chunk_frames
                
                # 记录进度
                if processed_frames % (frames_per_chunk * 5) == 0 or processed_frames == total_frames:
                    self.logger.info(f"已处理 {processed_frames:,}/{total_frames:,} 帧 "
                                   f"({processed_frames/total_frames*100:.1f}%)")
        
        return {
            'total_frames': total_frames,
            'processed_frames': processed_frames,
            'output_size_per_frame': output_size,
            'success': processed_frames == total_frames
        }
    
    def estimate_performance(self, file_size: int, extract_mode: str = '1024') -> dict:
        """估算转换性能
        
        Args:
            file_size: 文件大小(字节)
            extract_mode: 提取模式
            
        Returns:
            dict: 性能估算信息
        """
        if file_size % self.FRAME_SIZE_1092 != 0:
            raise ValueError(f"文件大小不是{self.FRAME_SIZE_1092}的整数倍")
        
        total_frames = file_size // self.FRAME_SIZE_1092
        
        # 基于经验的性能估算 (取决于硬件性能)
        # 这些数值是基于现代SSD和现代CPU的典型性能
        base_throughput_mbps = 200  # 基础吞吐量 200 MB/s
        
        # 根据提取模式调整
        if extract_mode == '896':
            throughput_factor = 0.9  # 896字节模式稍慢
        elif extract_mode == '1024':
            throughput_factor = 1.0  # 1024字节模式标准
        else:
            throughput_factor = 0.8  # 自定义模式最慢
        
        estimated_throughput = base_throughput_mbps * throughput_factor
        estimated_time = (file_size / (1024**2)) / estimated_throughput
        
        return {
            'file_size_mb': file_size / (1024**2),
            'file_size_gb': file_size / (1024**3),
            'total_frames': total_frames,
            'estimated_time_seconds': estimated_time,
            'estimated_throughput_mbps': estimated_throughput,
            'extract_mode': extract_mode
        }
    
    def verify_conversion(self, source_path: str, target_path: str, 
                         extract_mode: str = '1024', 
                         custom_range: Optional[Tuple[int, int]] = None,
                         sample_frames: int = 100) -> dict:
        """验证转换结果的正确性
        
        Args:
            source_path: 源文件路径
            target_path: 目标文件路径
            extract_mode: 提取模式
            custom_range: 自定义范围
            sample_frames: 采样验证的帧数
            
        Returns:
            dict: 验证结果
        """
        try:
            # 获取提取参数
            extract_params = self._get_extract_params(extract_mode, custom_range)
            start_byte, end_byte, output_size = extract_params
            
            # 获取文件信息
            source_size = os.path.getsize(source_path)
            target_size = os.path.getsize(target_path)
            
            total_frames = source_size // self.FRAME_SIZE_1092
            expected_target_size = total_frames * output_size
            
            if target_size != expected_target_size:
                return {
                    'valid': False,
                    'error': f"目标文件大小不匹配: 期望 {expected_target_size}, 实际 {target_size}"
                }
            
            # 采样验证
            verification_frames = min(sample_frames, total_frames)
            sample_indices = np.linspace(0, total_frames - 1, verification_frames, dtype=int)
            
            errors = 0
            
            with open(source_path, 'rb') as src_file, open(target_path, 'rb') as tgt_file:
                for i, frame_idx in enumerate(sample_indices):
                    # 读取源文件中的帧
                    src_file.seek(frame_idx * self.FRAME_SIZE_1092)
                    source_frame = src_file.read(self.FRAME_SIZE_1092)
                    
                    # 读取目标文件中对应的数据
                    tgt_file.seek(frame_idx * output_size)
                    target_data = tgt_file.read(output_size)
                    
                    # 提取源帧中的对应部分
                    expected_data = source_frame[start_byte:end_byte]
                    
                    # 比较
                    if target_data != expected_data:
                        errors += 1
                        self.logger.warning(f"帧 {frame_idx} 验证失败")
            
            success_rate = (verification_frames - errors) / verification_frames * 100
            
            return {
                'valid': errors == 0,
                'total_frames': total_frames,
                'verified_frames': verification_frames,
                'errors': errors,
                'success_rate': success_rate,
                'source_size': source_size,
                'target_size': target_size,
                'expected_target_size': expected_target_size
            }
            
        except Exception as e:
            return {
                'valid': False,
                'error': f"验证过程中出错: {str(e)}"
            }


def benchmark_conversion_methods():
    """性能基准测试 - 比较传统方法和向量化方法"""
    import tempfile
    import random
    
    # 创建测试数据
    test_frames = 10000  # 1万帧测试数据
    test_data_size = test_frames * 1092
    
    with tempfile.NamedTemporaryFile(delete=False, suffix='.dat') as temp_source:
        # 生成随机测试数据
        test_data = bytearray()
        for _ in range(test_frames):
            frame_data = bytes([random.randint(0, 255) for _ in range(1092)])
            test_data.extend(frame_data)
        
        temp_source.write(test_data)
        temp_source_path = temp_source.name
    
    try:
        converter = FastFrameConverter()
        
        # 测试向量化方法
        with tempfile.NamedTemporaryFile(delete=False, suffix='.dat') as temp_target:
            temp_target_path = temp_target.name
        
        start_time = time.time()
        result = converter.convert_frames_vectorized(
            temp_source_path, temp_target_path, '1024'
        )
        vectorized_time = time.time() - start_time
        
        print(f"向量化方法:")
        print(f"  处理帧数: {test_frames:,}")
        print(f"  数据大小: {test_data_size / (1024**2):.2f} MB")
        print(f"  处理时间: {vectorized_time:.3f} 秒")
        print(f"  吞吐量: {(test_data_size / (1024**2)) / vectorized_time:.1f} MB/s")
        
        # 清理临时文件
        os.unlink(temp_target_path)
        
        return {
            'frames': test_frames,
            'data_size_mb': test_data_size / (1024**2),
            'vectorized_time': vectorized_time,
            'vectorized_throughput': (test_data_size / (1024**2)) / vectorized_time
        }
        
    finally:
        os.unlink(temp_source_path)


if __name__ == "__main__":
    # 运行基准测试
    print("FastFrameConverter 性能基准测试")
    print("=" * 50)
    benchmark_results = benchmark_conversion_methods()
    print("\n基准测试完成!") 