# src/corrected_inference.py
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import subprocess
import time
import os


class CorrectedTensorRTInference:
    def __init__(self, engine_path, verbose=False):
        self.verbose = verbose
        self.logger = trt.Logger(trt.Logger.WARNING)
        self.engine = self.load_engine(engine_path)
        self.context = self.engine.create_execution_context()

        # 分配输入输出缓冲区
        self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers()

        if verbose:
            self.print_engine_info()

    def load_engine(self, engine_path):
        """加载 TensorRT 引擎"""
        if not os.path.exists(engine_path):
            raise FileNotFoundError(f"引擎文件不存在: {engine_path}")

        try:
            with open(engine_path, 'rb') as f:
                runtime = trt.Runtime(self.logger)
                engine = runtime.deserialize_cuda_engine(f.read())
                if engine is None:
                    raise ValueError("无法反序列化引擎")
                return engine
        except Exception as e:
            print(f"加载引擎失败: {e}")
            raise

    def allocate_buffers(self):
        """分配输入输出缓冲区"""
        inputs = []
        outputs = []
        bindings = []

        try:
            # 创建 CUDA 流
            stream = cuda.Stream()

            for i in range(self.engine.num_bindings):
                binding_name = self.engine.get_binding_name(i)
                dtype = self.engine.get_binding_dtype(i)
                shape = self.engine.get_binding_shape(i)

                # 处理动态形状
                if -1 in shape:
                    # 设置最小形状，实际推理时再调整
                    shape = [1 if dim == -1 else dim for dim in shape]

                size = trt.volume(shape) * self.engine.max_batch_size
                nptype = trt.nptype(dtype)

                # 分配锁页内存
                host_mem = cuda.pagelocked_empty(size, nptype)

                # 分配设备内存
                device_mem = cuda.mem_alloc(host_mem.nbytes)
                bindings.append(int(device_mem))

                binding_info = {
                    'name': binding_name,
                    'host': host_mem,
                    'device': device_mem,
                    'shape': shape,
                    'dtype': dtype,
                    'nptype': nptype
                }

                if self.engine.binding_is_input(i):
                    inputs.append(binding_info)
                else:
                    outputs.append(binding_info)

        except Exception as e:
            print(f"分配缓冲区失败: {e}")
            # 清理资源
            self.cleanup()
            raise

        return inputs, outputs, bindings, stream

    def cleanup(self):
        """清理资源"""
        if hasattr(self, 'outputs'):
            for output in self.outputs:
                if 'device' in output and output['device']:
                    try:
                        output['device'].free()
                    except:
                        pass

    def print_engine_info(self):
        """打印引擎信息"""
        print("=== TensorRT 引擎信息 ===")
        print(f"引擎名称: {self.engine.name}")
        print(f"最大批次大小: {self.engine.max_batch_size}")
        print(f"设备内存大小: {self.engine.device_memory_size} bytes")
        print(f"绑定数量: {self.engine.num_bindings}")

        for i in range(self.engine.num_bindings):
            name = self.engine.get_binding_name(i)
            shape = self.engine.get_binding_shape(i)
            dtype = self.engine.get_binding_dtype(i)
            is_input = self.engine.binding_is_input(i)
            print(f"  {'Input' if is_input else 'Output'} {i}: {name}, Shape: {shape}, Type: {dtype}")

    def get_gpu_memory_correct(self):
        """正确的 GPU 内存查询方法"""
        try:
            # 使用正确的 PyCUDA 内存查询
            free, total = cuda.mem_get_info()
            used = total - free
            return used, total
        except Exception as e:
            print(f"PyCUDA 内存查询失败: {e}")
            return self.get_gpu_memory_fallback()

    def get_gpu_memory_fallback(self):
        """回退的内存查询方法"""
        try:
            # 使用 nvidia-smi
            result = subprocess.check_output([
                'nvidia-smi',
                '--query-gpu=memory.used,memory.total',
                '--format=csv,noheader,nounits'
            ], timeout=5, stderr=subprocess.DEVNULL)

            memory_info = result.decode('utf-8').strip().split(',')
            if len(memory_info) >= 2:
                used_memory = int(memory_info[0].strip()) * 1024 * 1024  # MB to bytes
                total_memory = int(memory_info[1].strip()) * 1024 * 1024
                return used_memory, total_memory
        except:
            pass

        return None, None

    def set_binding_shapes(self, input_shapes):
        """设置动态形状（如果模型支持动态形状）"""
        try:
            for i, shape in enumerate(input_shapes):
                if self.engine.has_implicit_batch_dimension:
                    # 隐式批次维度
                    self.context.set_binding_shape(i, shape)
                else:
                    # 显式批次维度
                    self.context.set_binding_shape(i, shape)
            return True
        except Exception as e:
            print(f"设置形状失败: {e}")
            return False

    def infer(self, input_data, profile_memory=False):
        """执行推理"""
        start_time = time.time()

        if profile_memory:
            memory_before = self.get_gpu_memory_correct()

        try:
            # 处理输入数据
            if not isinstance(input_data, (list, tuple)):
                input_data = [input_data]

            # 设置输入形状（如果是动态形状）
            input_shapes = [data.shape for data in input_data]
            self.set_binding_shapes(input_shapes)

            # 准备输入数据
            for i, data in enumerate(input_data):
                # 确保数据类型匹配
                if data.dtype != self.inputs[i]['nptype']:
                    data = data.astype(self.inputs[i]['nptype'])

                # 复制数据到主机缓冲区
                np.copyto(self.inputs[i]['host'], data.ravel())

                # 传输到设备
                cuda.memcpy_htod_async(
                    self.inputs[i]['device'],
                    self.inputs[i]['host'],
                    self.stream
                )

            # 执行推理
            self.context.execute_async_v2(
                bindings=self.bindings,
                stream_handle=self.stream.handle
            )

            # 传输输出数据
            outputs = []
            for i, output in enumerate(self.outputs):
                cuda.memcpy_dtoh_async(
                    output['host'],
                    output['device'],
                    self.stream
                )

                # 获取输出形状
                if not self.engine.has_implicit_batch_dimension:
                    output_shape = self.context.get_binding_shape(i + len(self.inputs))
                else:
                    output_shape = output['shape']

                outputs.append(output['host'].copy())

            # 同步流
            self.stream.synchronize()

            inference_time = time.time() - start_time

            # 内存分析
            if profile_memory:
                memory_after = self.get_gpu_memory_correct()
                if memory_before[0] is not None and memory_after[0] is not None:
                    memory_used = (memory_after[0] - memory_before[0]) / 1024 / 1024  # MB
                    print(f"推理内存使用: {memory_used:.2f} MB")

            if self.verbose:
                print(f"推理完成，耗时: {inference_time * 1000:.2f} ms")

            return outputs[0] if len(outputs) == 1 else outputs

        except Exception as e:
            print(f"推理失败: {e}")
            import traceback
            traceback.print_exc()
            raise

    def __del__(self):
        """析构函数"""
        self.cleanup()


# 使用示例和测试
def test_corrected_inference():
    """测试修正后的推理类"""
    try:
        # 检查环境
        print("=== 环境检查 ===")
        import tensorrt as trt
        print(f"TensorRT 版本: {trt.__version__}")

        import pycuda.driver as cuda
        print(f"PyCUDA 版本: {cuda.get_driver_version()}")

        # 测试内存查询
        print("\n=== 内存查询测试 ===")
        free, total = cuda.mem_get_info()
        print(f"GPU 内存: 已使用 {(total - free) / 1024 ** 3:.2f} GB / 总共 {total / 1024 ** 3:.2f} GB")

        # 如果有引擎文件，测试推理
        engine_path = "../models/model.engine"
        if os.path.exists(engine_path):
            print(f"\n=== 加载引擎: {engine_path} ===")
            inferencer = CorrectedTensorRTInference(engine_path, verbose=True)

            # 创建测试输入
            input_shape = inferencer.inputs[0]['shape']
            print(f"输入形状: {input_shape}")

            input_data = np.random.random(input_shape).astype(np.float32)

            # 执行推理
            output = inferencer.infer(input_data, profile_memory=True)
            print(f"推理成功! 输出形状: {output.shape}")
        else:
            print(f"\n引擎文件不存在: {engine_path}")
            print("创建示例引擎...")
            create_sample_engine()

    except Exception as e:
        print(f"测试失败: {e}")
        import traceback
        traceback.print_exc()


def create_sample_engine():
    """创建示例 ONNX 模型并转换为引擎"""
    try:
        import onnx
        from onnx import helper, TensorProto

        # 创建简单的 ONNX 模型
        print("创建示例 ONNX 模型...")

        # 定义输入
        input_shape = [1, 3, 224, 224]
        X = helper.make_tensor_value_info('X', TensorProto.FLOAT, input_shape)

        # 定义输出
        output_shape = [1, 10]
        Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, output_shape)

        # 创建简单的图（一个全局平均池化 + 全连接层）
        node1 = helper.make_node(
            'GlobalAveragePool',
            ['X'],
            ['pool_output'],
            name='global_avg_pool'
        )

        node2 = helper.make_node(
            'Gemm',
            ['pool_output'],
            ['Y'],
            name='fc',
            alpha=1.0,
            beta=1.0,
            transA=0,
            transB=1
        )

        # 创建图
        graph_def = helper.make_graph(
            [node1, node2],
            'simple_model',
            [X],
            [Y]
        )

        # 创建模型
        model_def = helper.make_model(
            graph_def,
            producer_name='onnx-example'
        )

        # 保存模型
        onnx_path = "../models/sample_model.onnx"
        os.makedirs(os.path.dirname(onnx_path), exist_ok=True)
        onnx.save(model_def, onnx_path)
        print(f"示例 ONNX 模型已保存: {onnx_path}")

    except ImportError:
        print("ONNX 未安装，跳过示例模型创建")
    except Exception as e:
        print(f"创建示例模型失败: {e}")


if __name__ == "__main__":
    test_corrected_inference()