import onnx
import onnxruntime as ort
import numpy as np
import torch
from collections import deque
from typing import Dict, List, Tuple, Optional
import sys


class ONNXLayeredExecutor:
    def __init__(self, model_path: str, device_id: int = 0):
        self.device_id = device_id
        self.device = f"cuda:{device_id}"

        # 加载原始模型
        self.original_model = onnx.load(model_path)

        # 准备模型执行环境
        self._prepare_model()

        # 构建执行计划
        self._build_execution_plan()

        # 创建OP会话（不保存临时文件）
        self.op_sessions = self._create_op_sessions()

        # 内存管理
        self.memory_pool: Dict[str, torch.Tensor] = {}

        # 拓扑执行顺序（只包含成功创建会话的OP）
        self.execution_order = self._filtered_topological_sort()

    def _prepare_model(self):
        """准备模型：形状推断、值信息收集等"""
        # 尝试进行形状推断
        try:
            from onnx import shape_inference

            self.model = shape_inference.infer_shapes(self.original_model)
        except Exception as e:
            self.model = self.original_model
            print(f"形状推断失败: {str(e)}，使用原始模型")

        # 构建值信息字典
        self.value_info = {}
        for item in self.model.graph.input:
            self.value_info[item.name] = item
        for item in self.model.graph.output:
            self.value_info[item.name] = item
        for item in self.model.graph.value_info:
            self.value_info[item.name] = item

        # 收集所有初始值
        self.initializers = {init.name: init for init in self.model.graph.initializer}

        # 创建完整的模型会话用于参考
        self.full_session = ort.InferenceSession(
            self.model.SerializeToString(),
            providers=[("CUDAExecutionProvider", {"device_id": self.device_id})],
        )

    def _build_execution_plan(self):
        """构建执行计划：识别所有OP及其依赖"""
        self.op_nodes = {}
        self.op_dependencies = {}
        self.op_inputs = {}
        self.op_outputs = {}

        for node in self.model.graph.node:
            # 使用更简单的OP标识符
            op_id = node.name if node.name else f"{node.op_type}_{id(node)}"
            self.op_nodes[op_id] = node

            # 记录输入输出
            self.op_inputs[op_id] = [name for name in node.input if name]
            self.op_outputs[op_id] = node.output

            # 记录依赖关系
            self.op_dependencies[op_id] = []
            for input_name in self.op_inputs[op_id]:
                # 检查输入是否来自其他OP
                for other_id, other_node in self.op_nodes.items():
                    if input_name in self.op_outputs[other_id]:
                        self.op_dependencies[op_id].append(other_id)

    def _filtered_topological_sort(self) -> List[str]:
        """过滤后的拓扑排序：只包含成功创建会话的OP"""
        # 首先获取完整的拓扑排序
        full_order = self._full_topological_sort()

        # 过滤掉未成功创建会话的OP
        return [op_id for op_id in full_order if op_id in self.op_sessions]

    def _full_topological_sort(self) -> List[str]:
        """完整的拓扑排序"""
        in_degree = {op_id: 0 for op_id in self.op_nodes}

        # 计算入度
        for op_id, dependencies in self.op_dependencies.items():
            for dep_id in dependencies:
                if dep_id in in_degree:
                    in_degree[op_id] += 1

        # 初始化队列
        queue = deque()
        for op_id, deg in in_degree.items():
            if deg == 0:
                queue.append(op_id)

        # 执行拓扑排序
        execution_order = []
        while queue:
            op_id = queue.popleft()
            execution_order.append(op_id)

            # 减少依赖该OP的节点的入度
            for other_id in self.op_nodes:
                if op_id in self.op_dependencies[other_id]:
                    in_degree[other_id] -= 1
                    if in_degree[other_id] == 0:
                        queue.append(other_id)

        return execution_order

    def _create_op_sessions(self) -> Dict[str, ort.InferenceSession]:
        """为每个OP创建独立的ONNX Runtime会话（不保存临时文件）"""
        op_sessions = {}
        self.failed_ops = []  # 记录创建失败的OP

        for op_id, node in self.op_nodes.items():
            try:
                # 创建OP子模型
                subgraph = self._create_op_subgraph(node)

                # 直接序列化模型到内存
                model_bytes = subgraph.SerializeToString()

                # 创建会话
                sess_options = ort.SessionOptions()
                sess_options.log_severity_level = 3  # 只显示错误日志

                # 使用GPU提供者
                providers = [("CUDAExecutionProvider", {"device_id": self.device_id})]

                # 直接从字节流创建会话
                session = ort.InferenceSession(
                    model_bytes, sess_options, providers=providers
                )

                op_sessions[op_id] = session
                print(f"✅ 创建OP会话成功: {op_id} ({node.op_type})")

            except Exception as e:
                print(f"❌ 创建OP会话失败: {op_id} ({node.op_type}), 错误: {str(e)}")
                self.failed_ops.append(op_id)

                # 保存问题模型以便调试（可选）
                # debug_path = f"error_{op_id.replace('/', '_')}.onnx"
                # onnx.save(subgraph, debug_path)
                # print(f"  问题模型已保存到: {debug_path}")

        return op_sessions

    def _create_op_subgraph(self, node: onnx.NodeProto) -> onnx.ModelProto:
        """为单个OP创建子图模型（内存中操作）"""
        # 1. 收集输入信息
        inputs = []
        initializers = []
        added_inputs = set()

        for input_name in node.input:
            if not input_name or input_name in added_inputs:
                continue

            # 处理 initializer (权重/常量)
            if input_name in self.initializers:
                init = self.initializers[input_name]
                initializers.append(init)

                # 创建输入张量信息
                tensor_info = onnx.helper.make_tensor_value_info(
                    name=init.name, elem_type=init.data_type, shape=init.dims
                )
                inputs.append(tensor_info)
                added_inputs.add(input_name)
            # 处理数据输入 (非 initializer)
            elif input_name in self.value_info:
                inputs.append(self.value_info[input_name])
                added_inputs.add(input_name)

        # 2. 收集输出信息
        outputs = []
        for output_name in node.output:
            if output_name in self.value_info:
                outputs.append(self.value_info[output_name])
            else:
                # 创建默认输出
                tensor_info = onnx.helper.make_tensor_value_info(
                    name=output_name,
                    elem_type=onnx.TensorProto.FLOAT,
                    shape=None,  # 未知形状
                )
                outputs.append(tensor_info)

        # 3. 创建子图
        graph = onnx.helper.make_graph(
            nodes=[node],
            name=f"subgraph_{node.name}",
            inputs=inputs,
            outputs=outputs,
            initializer=initializers,
        )

        # 4. 创建模型
        model = onnx.helper.make_model(
            graph, producer_name="onnx-layered-executor", ir_version=10
        )
        del model.opset_import[:]  # 清空默认算子集
        model.opset_import.extend(self.model.opset_import)  # 正确复制算子集
        # 5. 复制算子集信息

        return model

    def _get_or_allocate_memory(
        self, tensor_name: str, shape: Tuple[int], dtype=torch.float32
    ) -> torch.Tensor:
        """获取或分配GPU内存"""
        if tensor_name in self.memory_pool:
            existing = self.memory_pool[tensor_name]
            if existing.shape == tuple(shape):
                return existing
            # 形状不匹配，重新分配
            del existing
            self.memory_pool.pop(tensor_name)

        # 分配新内存
        tensor = torch.empty(shape, dtype=dtype, device=self.device)
        self.memory_pool[tensor_name] = tensor
        return tensor

    def _get_tensor_shape(self, tensor_name: str) -> Tuple[int]:
        """获取张量的形状信息"""
        if tensor_name in self.value_info:
            shape = []
            for dim in self.value_info[tensor_name].type.tensor_type.shape.dim:
                if dim.HasField("dim_value"):
                    shape.append(dim.dim_value)
                elif dim.HasField("dim_param"):
                    # 动态维度处理 - 尝试使用合理的默认值
                    if dim.dim_param == "batch_size":
                        shape.append(1)  # 使用batch_size=1
                    else:
                        shape.append(1)  # 默认值
                else:
                    shape.append(1)  # 未知维度
            return tuple(shape)

        # 尝试从完整模型中获取形状
        for input in self.full_session.get_inputs():
            if input.name == tensor_name:
                return tuple(input.shape)
        for output in self.full_session.get_outputs():
            if output.name == tensor_name:
                return tuple(output.shape)

        return (1,)  # 默认形状

    def execute_op(
        self, op_id: str, input_buffers: Dict[str, torch.Tensor]
    ) -> Dict[str, torch.Tensor]:
        """执行单个OP"""
        if op_id not in self.op_sessions:
            raise ValueError(f"OP '{op_id}' 的会话未创建，可能初始化失败")

        session = self.op_sessions[op_id]
        node = self.op_nodes[op_id]

        # 准备IO绑定
        io_binding = session.io_binding()

        # 绑定输入
        for input_name in self.op_inputs[op_id]:
            # 跳过初始值（已在模型中包含）
            if input_name in self.initializers:
                continue

            if input_name not in input_buffers:
                # 尝试在内存池中查找
                if input_name in self.memory_pool:
                    input_buffers[input_name] = self.memory_pool[input_name]
                else:
                    raise KeyError(f"输入 '{input_name}' 在输入缓冲区中找不到")

            tensor = input_buffers[input_name]
            io_binding.bind_input(
                name=input_name,
                device_type="cuda",
                device_id=self.device_id,
                element_type=np.float32,
                shape=tuple(tensor.shape),
                buffer_ptr=tensor.data_ptr(),
            )

        # 绑定输出
        output_buffers = {}
        for output_name in self.op_outputs[op_id]:
            # 获取形状
            shape = self._get_tensor_shape(output_name)

            # 分配内存
            tensor = self._get_or_allocate_memory(output_name, shape)
            output_buffers[output_name] = tensor

            io_binding.bind_output(
                name=output_name,
                device_type="cuda",
                device_id=self.device_id,
                element_type=np.float32,
                shape=tuple(tensor.shape),
                buffer_ptr=tensor.data_ptr(),
            )

        # 执行OP
        session.run_with_iobinding(io_binding)

        return output_buffers

    def execute_full_model(self, input_data: torch.Tensor) -> torch.Tensor:
        """逐层执行整个模型"""
        # 确保输入在GPU上
        if not input_data.is_cuda:
            input_data = input_data.to(self.device)

        # 获取模型输入名称
        model_input_name = self.full_session.get_inputs()[0].name

        # 初始化缓冲区
        buffers = {model_input_name: input_data}

        # 按拓扑顺序执行所有OP
        for i, op_id in enumerate(self.execution_order):
            print(
                f"执行OP [{i+1}/{len(self.execution_order)}]: {op_id} ({self.op_nodes[op_id].op_type})"
            )

            try:
                # 执行当前OP
                outputs = self.execute_op(op_id, buffers)

                # 更新缓冲区
                buffers.update(outputs)

            except Exception as e:
                print(f"执行OP {op_id} 失败: {str(e)}")
                print(f"当前缓冲区: {list(buffers.keys())}")

                raise RuntimeError("无法继续执行模型")

        # 获取模型输出名称
        model_output_name = self.full_session.get_outputs()[0].name

        return buffers.get(model_output_name, next(iter(buffers.values())))


    def get_model_summary(self):
        """获取模型摘要信息"""
        summary = {
            "total_ops": len(self.op_nodes),
            "successful_ops": len(self.op_sessions),
            "failed_ops": len(self.failed_ops),
            "input_shape": self._get_tensor_shape(
                self.full_session.get_inputs()[0].name
            ),
            "output_shape": self._get_tensor_shape(
                self.full_session.get_outputs()[0].name
            ),
            "execution_order": self.execution_order,
            "failed_op_details": [],
        }

        for op_id in self.failed_ops:
            node = self.op_nodes[op_id]
            summary["failed_op_details"].append(
                {
                    "op_id": op_id,
                    "op_type": node.op_type,
                    "inputs": node.input,
                    "outputs": node.output,
                }
            )

        return summary

    def print_model_summary(self):
        """打印模型摘要信息"""
        summary = self.get_model_summary()

        print("\n===== 模型摘要 =====")
        print(f"总OP数量: {summary['total_ops']}")
        print(f"成功创建会话的OP: {summary['successful_ops']}")
        print(f"创建会话失败的OP: {summary['failed_ops']}")
        print(f"输入形状: {summary['input_shape']}")
        print(f"输出形状: {summary['output_shape']}")

        if summary["failed_ops"] > 0:
            print("\n失败的OP详情:")
            for detail in summary["failed_op_details"]:
                print(f"  - OP ID: {detail['op_id']}")
                print(f"    类型: {detail['op_type']}")
                print(f"    输入: {detail['inputs']}")
                print(f"    输出: {detail['outputs']}")

        print("\n执行顺序:")
        for i, op_id in enumerate(summary["execution_order"]):
            node = self.op_nodes[op_id]
            print(f"  {i+1}. {op_id} ({node.op_type})")


# 使用示例
if __name__ == "__main__":
    # 初始化执行器
    model_path = sys.argv[1]  # 替换为您的模型路径
    executor = ONNXLayeredExecutor(model_path, device_id=0)

    # 打印模型摘要
    executor.print_model_summary()

    # 准备输入数据
    input_shape = executor.get_model_summary()["input_shape"]
    input_data = torch.randn(*input_shape, device=executor.device)

    output = executor.execute_full_model(input_data).cpu().numpy()
    # print(output)
    print(f"\n推理成功! 输出形状: {output.shape}")

    ort_session = ort.InferenceSession(model_path)
    # 获取模型的输入名称
    input_name = ort_session.get_inputs()[0].name
    # 运行模型
    outputs = ort_session.run(None, {input_name: input_data.cpu().numpy()})
    np.testing.assert_allclose(outputs[0], output, atol=1e-2)