import os
import sys
from pathlib import Path
PROJECT_ROOT_STR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if PROJECT_ROOT_STR not in sys.path:
    sys.path.insert(0, PROJECT_ROOT_STR)
    
from config.path_config import MODELS_DIR,DATA_DIR
import numpy as np
import time
from typing import Optional, Tuple, List
import logging
import gc

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class ModelBenchmark:
    """模型性能基准测试类"""
    def __init__(self,warmup: int = 10, repeat: int = 100 , device: str = 'cuda'):
        """
        初始化基准测试参数
        
        Args:
            warmup: 预热次数
            repeat: 测试重复次数
            device: 运行设备 ('cuda' 或 'cpu')
        """
        self.warmup = warmup
        self.repeat = repeat
        self.device = device
    
    def benchmark_onnx(self, onnx_path: str, batch_size: int = 1) -> dict:
        """
        ONNX模型性能测试
        
        Args:
            onnx_path: ONNX模型文件路径
            batch_size: 批处理大小
            
        Returns:
            包含平均时间和FPS的字典
        """
        try:
            import onnxruntime as ort
        except ImportError:
            raise ImportError("onnxruntime not installed. Please install it.")
        
        # 创建推理会话
        providers = ['CUDAExecutionProvider'] if self.device == 'cuda' else ['CPUExecutionProvider']
        session = ort.InferenceSession(onnx_path,providers=providers)
        
        # 获取输入信息并生成测试数据
        input_info = session.get_inputs()[0]
        input_shape = self._resolve_dynamic_shape(input_info.shape,batch_size)
        dummy_input = np.random.randn(*input_shape).astype(np.float32)
        
        #预热
        self._warmup_onnx(session, input_info.name,dummy_input)
        
        #性能测试
        avg_time, fps = self._benchmark_onnx(session,input_info.name,dummy_input)

        result = {
            'model_path': onnx_path,
            'avg_time_ms': avg_time,
            'fps': fps,
            'batch_size': batch_size
        }
        
        logger.info(f'[ONNX] {os.path.basename(onnx_path)} avg: {avg_time:.2f} ms, FPS: {fps:.2f}')
        return result
     
    def benchmark_tensorrt(self, engine_path: str, batch_size: int=1) -> dict:
        """
        TensorRT引擎性能测试
        
        Args:
            engine_path: TensorRT引擎文件路径
            batch_size: 批处理大小
            
        Returns:
            包含平均时间和FPS的字典
        """ 
        try:
            import tensorrt as trt
            import pycuda.driver as cuda
            import pycuda.autoinit
        except ImportError:
            raise ImportError("tensorrt or pycuda not installed.")
        
        engine = None
        context = None
        d_input = None
        d_output = None
        
        try:
            # 加载TensorRT引擎
            trt_logger = trt.Logger(trt.Logger.WARNING)
            engine, context = self._load_tensorrt_engine(engine_path, trt_logger)
            
            if context is None:
                raise RuntimeError("Failed to create TensorRT execution context")
            
            # 配置输入输出
            io_info = self._setup_tensorrt_io(engine, context, batch_size)
            if io_info is None:
                raise RuntimeError("Failed to setup TensorRT IO")
            
            # 分配GPU内存
            gpu_buffers = self._allocate_gpu_memory(io_info, cuda)       
    
            # 预热
            self._warmup_tensorrt(context, io_info, gpu_buffers, cuda) 
            
            # 性能测试
            avg_time, fps = self._benchmark_tensorrt(context, io_info, gpu_buffers, cuda)          
            
            result = {
                'model_path': engine_path,
                'avg_time_ms': avg_time,
                'fps': fps,
                'batch_size': batch_size
            }    
        
            logger.info(f'[TensorRT] {os.path.basename(engine_path)} avg: {avg_time:.2f} ms, FPS: {fps:.2f}')
            return result
        
        finally:
            # 清理资源  -  按照顺序
            try:
                if d_input is not None:
                    d_input.free()
            except Exception as e:
                logger.debug(f"Error freeing d_input: {e}")  
                
            try:
                if d_output is not None:
                    d_output.free()
            except Exception as e:
                logger.debug(f"Error freeing d_output: {e}")
            
            # 显式删除对象引用
            if context is not None:
                del context
            if engine is not None:
                del engine
                
            # 强制垃圾回收
            gc.collect()                    
        
    def _resolve_dynamic_shape(self, shape: List, batch_size: int) -> List[int]:
        """解析动态形状，将动态维度替换为指定的batch_size"""
        return [batch_size if (isinstance(d, str) or d == -1) else d for d in shape]

    def _warmup_onnx(self, session, input_name: str, dummy_input: np.ndarray):
        """ONNX模型预热"""
        for _ in range(self.warmup):
            session.run(None, {input_name: dummy_input})
            
    def _benchmark_onnx(self, session, input_name: str, dummy_input: np.ndarray) -> Tuple[float, float]:
        """ONNX模型性能测试"""
        start_time = time.perf_counter()
        for _ in range(self.repeat):
            session.run(None, {input_name: dummy_input})
        elapsed_time = time.perf_counter() - start_time
        
        avg_time = elapsed_time / self.repeat * 1000  # 转换为毫秒
        fps = self.repeat / elapsed_time
        
        return avg_time, fps

    def _load_tensorrt_engine(self,engine_path: str, trt_logger):
        """加载TensorRT引擎"""
        import tensorrt as trt
        
        with open(engine_path, 'rb') as f, trt.Runtime(trt_logger) as runtime:
            engine = runtime.deserialize_cuda_engine(f.read())
            
        context = engine.create_execution_context()
        return engine, context
    
    def _setup_tensorrt_io(self,engine,context,batch_size: int)->dict:
        """设置TensorRT输入输出信息"""
        import tensorrt as trt
        
        # 获取输入输出索引
        input_indices = []
        output_indices = []      
        
        for i in range(engine.num_io_tensors):
            name = engine.get_tensor_name(i)
            mode = engine.get_tensor_mode(name)
            if int(mode) == int(trt.TensorIOMode.INPUT):
                input_indices.append((i, name))
            elif int(mode) == int(trt.TensorIOMode.OUTPUT):
                output_indices.append((i, name))          
 
        # 假设单输入单输出，取第一个
        input_idx, input_name = input_indices[0]
        output_idx, output_name = output_indices[0]   
        
        # 设置输入形状
        input_shape = list(engine.get_tensor_shape(input_name))
        if -1 in input_shape:
            input_shape = [batch_size if d == -1 else d for d in input_shape]
            context.set_input_shape(input_name, input_shape)        

        # 获取输出形状
        output_shape = list(context.get_tensor_shape(output_name))
        if -1 in output_shape:
            output_shape = [batch_size if d == -1 else d for d in output_shape]         

        # 生成测试数据
        dummy_input = np.random.randn(*input_shape).astype(np.float32)
        output_array = np.empty(output_shape, dtype=np.float32)

        return {
            'input_idx': input_idx,
            'output_idx': output_idx,
            'input_name': input_name,
            'output_name': output_name,
            'dummy_input': dummy_input,
            'output_array': output_array,
            'num_io_tensors': engine.num_io_tensors
        }  
        
    def _setup_tensorrt_io_multi(self, engine, context, batch_size: int) -> dict:
        """支持多输入多输出的版本"""
        import tensorrt as trt
        
        inputs_info = []
        outputs_info = []
        
        for i in range(engine.num_io_tensors):
            name = engine.get_tensor_name(i)
            mode = engine.get_tensor_mode(name)
            shape = list(engine.get_tensor_shape(name))
            
            if int(mode) == int(trt.TensorIOMode.INPUT):
                if -1 in shape:
                    shape = [batch_size if d == -1 else d for d in shape]
                    context.set_input_shape(name, shape)
                
                inputs_info.append({
                    'idx': i,
                    'name': name,
                    'shape': shape,
                    'data': np.random.randn(*shape).astype(np.float32)
                })
            elif int(mode) == int(trt.TensorIOMode.OUTPUT):
                shape = list(context.get_tensor_shape(name))
                if -1 in shape:
                    shape = [batch_size if d == -1 else d for d in shape]
                
                outputs_info.append({
                    'idx': i,
                    'name': name,
                    'shape': shape,
                    'data': np.empty(shape, dtype=np.float32)
                })
        
        return {
            'inputs': inputs_info,
            'outputs': outputs_info,
            'num_io_tensors': engine.num_io_tensors
        }      

    def _allocate_gpu_memory(self, io_info: dict, cuda) -> dict:
        """分配GPU内存"""
        d_input = cuda.mem_alloc(io_info['dummy_input'].nbytes)
        d_output = cuda.mem_alloc(io_info['output_array'].nbytes)
        
        # 创建绑定数组
        bindings = [None] * io_info['num_io_tensors']
        bindings[io_info['input_idx']] = int(d_input)
        bindings[io_info['output_idx']] = int(d_output)
        
        return {
            'd_input': d_input,
            'd_output': d_output,
            'bindings': bindings
        }

    def _warmup_tensorrt(self, context, io_info: dict, gpu_buffers: dict, cuda):
        """TensorRT引擎预热"""
        for _ in range(self.warmup):
            cuda.memcpy_htod(gpu_buffers['d_input'], io_info['dummy_input'])
            context.execute_v2(gpu_buffers['bindings'])
            cuda.memcpy_dtoh(io_info['output_array'], gpu_buffers['d_output'])

    def _benchmark_tensorrt(self, context, io_info: dict, gpu_buffers: dict, cuda) -> Tuple[float, float]:
        """TensorRT引擎性能测试"""
        start_time = time.perf_counter()
        for _ in range(self.repeat):
            cuda.memcpy_htod(gpu_buffers['d_input'], io_info['dummy_input'])
            context.execute_v2(gpu_buffers['bindings'])
            cuda.memcpy_dtoh(io_info['output_array'], gpu_buffers['d_output'])
        elapsed_time = time.perf_counter() - start_time
        
        avg_time = elapsed_time / self.repeat * 1000  # 转换为毫秒
        fps = self.repeat / elapsed_time
        
        return avg_time, fps

    def _cleanup_gpu_memory(self, gpu_buffers: dict, cuda):
        """清理GPU内存"""
        gpu_buffers['d_input'].free()
        gpu_buffers['d_output'].free()

class ModelBenchmarkRunner:
    """模型基准测试运行器"""
    def __init__(self,warmup:int =10,repeat:int =100,device:str='cuda'):
        """
        初始化测试运行器
        
        Args:
            warmup: 预热次数
            repeat: 测试重复次数
            device: 运行设备
        """
        self.benchmark = ModelBenchmark(warmup,repeat,device)
        
    @staticmethod
    def get_model_type(model_path: str) -> str:
        """
        根据文件扩展名判断模型类型
        
        Args:
            model_path: 模型文件路径
            
        Returns:
            模型类型 ('onnx' 或 'tensorrt')
        """
        suffix = model_path.lower().split('.')[-1]
        if suffix == 'onnx':
            return 'onnx'
        elif suffix == 'engine':
            return 'tensorrt'
        else:
            raise ValueError(f"Unsupported model type: {suffix}") 
        
    def run_single_model(self, model_path: str, batch_size: int = 1) -> dict:
        """
        运行单个模型的性能测试
        
        Args:
            model_path: 模型文件路径
            batch_size: 批处理大小
            
        Returns:
            测试结果字典
        """ 
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"Model file not found: {model_path}")   
        
        model_type = self.get_model_type(model_path)
        
        if model_type == 'onnx':
            return self.benchmark.benchmark_onnx(model_path,batch_size)
        elif model_type == 'tensorrt':
            return self.benchmark.benchmark_tensorrt(model_path,batch_size)
          
    def run_comparison(self, model_paths: List[str], batch_size: int = 1) -> List[dict]:
        """
        运行多个模型的性能对比测试
        
        Args:
            model_paths: 模型文件路径列表
            batch_size: 批处理大小
            
        Returns:
            所有模型的测试结果列表
        """
        results = []
        
        for model_path in model_paths:
            try:
                result = self.run_single_model(model_path, batch_size)
                results.append(result)
            except Exception as e:
                logger.error(f"Error testing {model_path}: {str(e)}")
        
        return results

    def print_comparison_results(self, results: List[dict]):
        """打印比较结果"""
        if not results:
            logger.warning("No results to display")
            return
        
        print("\n" + "="*80)
        print("MODEL PERFORMANCE COMPARISON")
        print("="*80)
        
        for i, result in enumerate(results, 1):
            print(f"{i}. {os.path.basename(result['model_path'])}")
            print(f"   Average Time: {result['avg_time_ms']:.2f} ms")
            print(f"   FPS: {result['fps']:.2f}")
            print(f"   Batch Size: {result['batch_size']}")
            print("-" * 40)

def main():
    """主函数示例"""
    # 模型配置路径
    
    ONNX_PATH   = os.path.join(MODELS_DIR, 'onnx/yolov11n.onnx')
    ENGINE_PATH = os.path.join(MODELS_DIR, 'engine/yolov11n_int8.engine')
    DATA_PATH    = os.path.join(DATA_DIR, 'coco128/images/train2017')
    LABEL_PATH   = os.path.join(DATA_DIR, 'coco128/labels/train2017')
    
    model_paths = [
        ONNX_PATH,
        ENGINE_PATH
    ]
    
    # 创建测试运行器
    runner = ModelBenchmarkRunner(warmup=10, repeat=100, device='cuda')
    
    try:
        # 运行性能对比测试
        results = runner.run_comparison(model_paths, batch_size=1)
        
        # 打印对比结果
        runner.print_comparison_results(results)
    
    except Exception as e:
        logger.error(f"Benchmark failed: {str(e)}")
    
if __name__ == "__main__":
    main()