"""
性能测试脚本 - 对比传统方法与NumPy向量化方法的性能差异
用于验证FastFrameConverter的性能优势
"""

import os
import time
import tempfile
import random
import numpy as np
from fast_frame_converter import FastFrameConverter
from logger import logger


class PerformanceTestSuite:
    """性能测试套件"""
    
    def __init__(self):
        self.logger = logger
    
    def generate_test_data(self, num_frames: int) -> bytes:
        """生成测试数据
        
        Args:
            num_frames: 要生成的帧数
            
        Returns:
            bytes: 生成的测试数据
        """
        self.logger.info(f"生成 {num_frames:,} 帧测试数据...")
        
        # 为了保证可重复性，使用固定种子
        random.seed(42)
        np.random.seed(42)
        
        test_data = bytearray()
        frame_size = 1092
        
        for i in range(num_frames):
            # 生成一帧数据
            frame_data = bytearray(frame_size)
            
            # 帧头部分 (前64字节) - 使用更真实的数据模式
            frame_data[0:4] = (1234567890).to_bytes(4, 'big')  # 帧头
            frame_data[4:8] = frame_size.to_bytes(4, 'big')    # 帧长度
            frame_data[20:24] = i.to_bytes(4, 'big')           # 帧计数
            
            # 数据区 (64-1088字节) - 使用随机数据模拟真实数据
            for j in range(64, 1088):
                frame_data[j] = random.randint(0, 255)
            
            # 填充剩余字节
            for j in range(1088, frame_size):
                frame_data[j] = random.randint(0, 255)
            
            test_data.extend(frame_data)
        
        return bytes(test_data)
    
    def traditional_convert(self, source_data: bytes, extract_mode: str = '1024') -> tuple:
        """传统转换方法 - 逐帧处理
        
        Args:
            source_data: 源数据
            extract_mode: 提取模式
            
        Returns:
            tuple: (转换后数据, 处理时间)
        """
        start_time = time.time()
        
        frame_size = 1092
        frame_count = len(source_data) // frame_size
        
        # 确定提取参数
        if extract_mode == '1024':
            extract_range = (64, 1088)  # f896+f128
        elif extract_mode == '896':
            extract_range = (64, 960)   # 仅f896
        else:
            raise ValueError(f"不支持的提取模式: {extract_mode}")
        
        result_data = bytearray()
        
        # 逐帧处理 - 传统方法
        for i in range(frame_count):
            frame_start = i * frame_size
            start_byte, end_byte = extract_range
            
            # 提取指定范围的数据
            extracted_data = source_data[frame_start + start_byte:frame_start + end_byte]
            result_data.extend(extracted_data)
        
        elapsed_time = time.time() - start_time
        return bytes(result_data), elapsed_time
    
    def vectorized_convert(self, source_path: str, target_path: str, extract_mode: str = '1024') -> tuple:
        """向量化转换方法
        
        Args:
            source_path: 源文件路径
            target_path: 目标文件路径
            extract_mode: 提取模式
            
        Returns:
            tuple: (结果统计, 处理时间)
        """
        converter = FastFrameConverter()
        
        start_time = time.time()
        result = converter.convert_frames_vectorized(
            source_path=source_path,
            target_path=target_path,
            extract_mode=extract_mode,
            chunk_size_mb=50  # 使用较小的块以适应测试
        )
        elapsed_time = time.time() - start_time
        
        return result, elapsed_time
    
    def run_performance_test(self, test_frames_list: list = None):
        """运行性能测试
        
        Args:
            test_frames_list: 测试帧数列表，默认为[1000, 5000, 10000, 50000]
        """
        if test_frames_list is None:
            test_frames_list = [1000, 5000, 10000, 50000]
        
        print("=" * 80)
        print("帧数据转换性能测试")
        print("=" * 80)
        print("对比传统逐帧处理方法 vs NumPy向量化方法")
        print("")
        
        results = []
        
        for num_frames in test_frames_list:
            print(f"测试 {num_frames:,} 帧数据...")
            print("-" * 60)
            
            # 生成测试数据
            test_data = self.generate_test_data(num_frames)
            data_size_mb = len(test_data) / (1024 * 1024)
            
            print(f"数据大小: {data_size_mb:.2f} MB")
            
            # 创建临时文件
            with tempfile.NamedTemporaryFile(delete=False, suffix='.dat') as temp_source:
                temp_source.write(test_data)
                temp_source_path = temp_source.name
            
            try:
                # 测试传统方法
                print("测试传统方法...")
                traditional_data, traditional_time = self.traditional_convert(test_data, '1024')
                traditional_throughput = data_size_mb / traditional_time
                
                # 测试向量化方法
                print("测试向量化方法...")
                with tempfile.NamedTemporaryFile(delete=False, suffix='.dat') as temp_target:
                    temp_target_path = temp_target.name
                
                try:
                    vectorized_result, vectorized_time = self.vectorized_convert(
                        temp_source_path, temp_target_path, '1024'
                    )
                    vectorized_throughput = data_size_mb / vectorized_time
                    
                    # 验证结果一致性
                    with open(temp_target_path, 'rb') as f:
                        vectorized_data = f.read()
                    
                    data_match = (traditional_data == vectorized_data)
                    
                    # 计算性能提升
                    speedup = traditional_time / vectorized_time
                    
                    # 记录结果
                    test_result = {
                        'frames': num_frames,
                        'data_size_mb': data_size_mb,
                        'traditional_time': traditional_time,
                        'vectorized_time': vectorized_time,
                        'traditional_throughput': traditional_throughput,
                        'vectorized_throughput': vectorized_throughput,
                        'speedup': speedup,
                        'data_match': data_match
                    }
                    results.append(test_result)
                    
                    # 显示结果
                    print(f"传统方法    : {traditional_time:.3f}s ({traditional_throughput:.1f} MB/s)")
                    print(f"向量化方法  : {vectorized_time:.3f}s ({vectorized_throughput:.1f} MB/s)")
                    print(f"性能提升    : {speedup:.1f}x")
                    print(f"数据一致性  : {'通过' if data_match else '失败'}")
                    print("")
                    
                finally:
                    if os.path.exists(temp_target_path):
                        os.unlink(temp_target_path)
                        
            finally:
                if os.path.exists(temp_source_path):
                    os.unlink(temp_source_path)
        
        # 显示汇总结果
        self._print_summary(results)
        return results
    
    def _print_summary(self, results: list):
        """打印汇总结果"""
        print("=" * 80)
        print("性能测试汇总")
        print("=" * 80)
        print(f"{'帧数':<12} {'数据大小':<12} {'传统方法':<12} {'向量化方法':<12} {'性能提升':<12} {'一致性':<8}")
        print("-" * 80)
        
        total_speedup = 0
        
        for result in results:
            print(f"{result['frames']:<12,} "
                  f"{result['data_size_mb']:<12.2f} "
                  f"{result['traditional_time']:<12.3f} "
                  f"{result['vectorized_time']:<12.3f} "
                  f"{result['speedup']:<12.1f}x "
                  f"{'✓' if result['data_match'] else '✗':<8}")
            total_speedup += result['speedup']
        
        avg_speedup = total_speedup / len(results)
        print("-" * 80)
        print(f"平均性能提升: {avg_speedup:.1f}x")
        
        # 计算最大吞吐量
        max_vectorized_throughput = max(r['vectorized_throughput'] for r in results)
        print(f"最大向量化吞吐量: {max_vectorized_throughput:.1f} MB/s")
        print("")
    
    def memory_usage_test(self, file_size_gb: float = 0.1):
        """内存使用测试
        
        Args:
            file_size_gb: 测试文件大小(GB)
        """
        import psutil
        import gc
        
        print(f"内存使用测试 - 文件大小: {file_size_gb:.2f} GB")
        print("-" * 50)
        
        # 计算帧数
        frame_size = 1092
        file_size_bytes = int(file_size_gb * 1024 * 1024 * 1024)
        num_frames = file_size_bytes // frame_size
        
        # 生成测试数据
        print("生成测试数据...")
        process = psutil.Process()
        initial_memory = process.memory_info().rss / (1024 * 1024)  # MB
        
        test_data = self.generate_test_data(num_frames)
        
        after_generation_memory = process.memory_info().rss / (1024 * 1024)  # MB
        memory_increase = after_generation_memory - initial_memory
        
        print(f"初始内存: {initial_memory:.1f} MB")
        print(f"生成数据后内存: {after_generation_memory:.1f} MB")
        print(f"内存增长: {memory_increase:.1f} MB")
        print("")
        
        # 创建临时文件进行测试
        with tempfile.NamedTemporaryFile(delete=False, suffix='.dat') as temp_source:
            temp_source.write(test_data)
            temp_source_path = temp_source.name
        
        try:
            # 清理内存
            del test_data
            gc.collect()
            
            # 测试向量化方法的内存使用
            print("测试向量化方法内存使用...")
            before_vectorized = process.memory_info().rss / (1024 * 1024)
            
            with tempfile.NamedTemporaryFile(delete=False, suffix='.dat') as temp_target:
                temp_target_path = temp_target.name
            
            try:
                converter = FastFrameConverter()
                result, elapsed_time = self.vectorized_convert(
                    temp_source_path, temp_target_path, '1024'
                )
                
                after_vectorized = process.memory_info().rss / (1024 * 1024)
                vectorized_memory_usage = after_vectorized - before_vectorized
                
                print(f"向量化方法前内存: {before_vectorized:.1f} MB")
                print(f"向量化方法后内存: {after_vectorized:.1f} MB")
                print(f"向量化方法内存使用: {vectorized_memory_usage:.1f} MB")
                print(f"处理时间: {elapsed_time:.2f}s")
                print(f"吞吐量: {result['throughput_mbps']:.1f} MB/s")
                
            finally:
                if os.path.exists(temp_target_path):
                    os.unlink(temp_target_path)
                    
        finally:
            if os.path.exists(temp_source_path):
                os.unlink(temp_source_path)


def main():
    """主函数"""
    test_suite = PerformanceTestSuite()
    
    print("NumPy向量化帧数据转换性能测试")
    print("")
    
    # 基础性能测试
    test_suite.run_performance_test([1000, 5000, 10000])
    
    # 内存使用测试
    test_suite.memory_usage_test(0.05)  # 50MB测试文件
    
    print("测试完成!")


if __name__ == "__main__":
    main() 