# uamcf/adapters/onnx.py
import os
import time
import numpy as np
import tracemalloc
from typing import Dict, Any, Optional, List, Tuple, Union

from .base import ModelAdapter
from ..utils.logger import get_logger


class ONNXAdapter(ModelAdapter):
    """
    ONNX模型适配器，实现ONNX特定的接口
    """

    def __init__(self):
        """初始化ONNX适配器"""
        super().__init__()
        try:
            import onnx
            import onnxruntime as ort
            self.onnx = onnx
            self.ort = ort
            self.logger.info("ONNX adapter initialized")
        except ImportError:
            self.logger.error("ONNX/ONNX Runtime not installed. Please install with 'pip install onnx onnxruntime'")

    def load_model(self, model_path: str) -> Any:
        """
        加载ONNX模型

        Args:
            model_path: 模型文件路径

        Returns:
            加载的ONNX模型
        """
        try:
            # 加载ONNX模型
            model = self.onnx.load(model_path)

            # 验证模型
            self.onnx.checker.check_model(model)

            # 创建推理会话
            session = self.ort.InferenceSession(model_path)

            # 存储模型和会话
            result = {
                "model": model,
                "session": session,
                "path": model_path
            }

            self.logger.info(f"Model loaded from {model_path}")
            return result

        except Exception as e:
            self.logger.error(f"Failed to load model: {str(e)}")
            return None

    def save_model(self, model: Any, save_path: str) -> str:
        """
        保存ONNX模型

        Args:
            model: 要保存的模型
            save_path: 保存路径

        Returns:
            实际保存的路径
        """
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)

            # 从模型字典中提取ONNX模型
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
            else:
                onnx_model = model

            # 保存模型
            self.onnx.save(onnx_model, save_path)

            self.logger.info(f"Model saved to {save_path}")
            return save_path

        except Exception as e:
            self.logger.error(f"Failed to save model: {str(e)}")
            return ""

    def get_model_info(self, model: Any) -> Dict:
        """
        获取ONNX模型基本信息

        Args:
            model: ONNX模型

        Returns:
            模型信息字典
        """
        info = {}

        try:
            # 从模型字典中提取ONNX模型
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
            else:
                onnx_model = model

            # 基本信息
            info["type"] = "ONNX"

            # 检查IR版本
            if hasattr(onnx_model, 'ir_version'):
                info["ir_version"] = str(onnx_model.ir_version)

            # 检查操作集版本
            if hasattr(onnx_model, 'opset_import') and onnx_model.opset_import:
                info["opset_version"] = str(onnx_model.opset_import[0].version)

            # 获取生产者名称
            if hasattr(onnx_model, 'producer_name') and onnx_model.producer_name:
                info["producer_name"] = onnx_model.producer_name

            # 获取模型版本
            if hasattr(onnx_model, 'model_version') and onnx_model.model_version:
                info["model_version"] = str(onnx_model.model_version)

            # 获取图信息
            if hasattr(onnx_model, 'graph'):
                # 节点数量
                info["node_count"] = len(onnx_model.graph.node)

                # 输入数量和形状
                inputs = []
                for input in onnx_model.graph.input:
                    input_info = {"name": input.name}
                    if hasattr(input, 'type') and hasattr(input.type, 'tensor_type'):
                        if hasattr(input.type.tensor_type, 'shape'):
                            shape = []
                            for dim in input.type.tensor_type.shape.dim:
                                if hasattr(dim, 'dim_value') and dim.dim_value:
                                    shape.append(int(dim.dim_value))
                                else:
                                    shape.append(None)
                            input_info["shape"] = shape
                    inputs.append(input_info)
                info["inputs"] = inputs

                # 输出数量和形状
                outputs = []
                for output in onnx_model.graph.output:
                    output_info = {"name": output.name}
                    if hasattr(output, 'type') and hasattr(output.type, 'tensor_type'):
                        if hasattr(output.type.tensor_type, 'shape'):
                            shape = []
                            for dim in output.type.tensor_type.shape.dim:
                                if hasattr(dim, 'dim_value') and dim.dim_value:
                                    shape.append(int(dim.dim_value))
                                else:
                                    shape.append(None)
                            output_info["shape"] = shape
                    outputs.append(output_info)
                info["outputs"] = outputs

            # 参数数量和大小
            info["parameters"] = self.get_parameter_count(model)
            info["size_bytes"] = self.get_model_size(model)

        except Exception as e:
            self.logger.error(f"Failed to get model info: {str(e)}")

        return info

    def get_layers_info(self, model: Any) -> Dict[str, Dict]:
        """
        获取ONNX模型的层信息

        Args:
            model: ONNX模型

        Returns:
            层信息字典，键为层名称，值为层属性
        """
        layers_info = {}

        try:
            # 从模型字典中提取ONNX模型
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
            else:
                onnx_model = model

            if not hasattr(onnx_model, 'graph') or not hasattr(onnx_model.graph, 'node'):
                return {}

            # 遍历图中的所有节点
            for i, node in enumerate(onnx_model.graph.node):
                node_name = node.name or f"node_{i}"

                # 基本节点信息
                node_info = {
                    "type": node.op_type,
                    "index": i,
                    "inputs": list(node.input),
                    "outputs": list(node.output),
                }

                # 添加属性信息
                attrs = {}
                for attr in node.attribute:
                    attr_name = attr.name
                    attr_value = None

                    # 根据属性类型提取值
                    if attr.type == 1:  # FLOAT
                        attr_value = attr.f
                    elif attr.type == 2:  # INT
                        attr_value = attr.i
                    elif attr.type == 3:  # STRING
                        attr_value = attr.s
                    elif attr.type == 4:  # TENSOR
                        attr_value = "tensor"
                    elif attr.type == 5:  # GRAPH
                        attr_value = "graph"
                    elif attr.type == 6:  # FLOATS
                        attr_value = list(attr.floats)
                    elif attr.type == 7:  # INTS
                        attr_value = list(attr.ints)
                    elif attr.type == 8:  # STRINGS
                        attr_value = list(attr.strings)

                    attrs[attr_name] = attr_value

                node_info["attributes"] = attrs

                # 尝试估计参数数量
                # 这是一个粗略的估计，不同操作类型的参数计算方式不同
                if node.op_type in ["Conv", "ConvTranspose", "Gemm", "MatMul"]:
                    # 为常见的参数化节点类型估计参数
                    # 实际上需要查找关联的初始化器来获取确切数字
                    node_info["estimated_parameters"] = True

                layers_info[node_name] = node_info

        except Exception as e:
            self.logger.error(f"Failed to get layers info: {str(e)}")

        return layers_info

    def get_layer(self, model: Any, layer_name: str) -> Any:
        """
        获取指定名称的层

        Args:
            model: ONNX模型
            layer_name: 层名称

        Returns:
            层对象
        """
        try:
            # 从模型字典中提取ONNX模型
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
            else:
                onnx_model = model

            if not hasattr(onnx_model, 'graph') or not hasattr(onnx_model.graph, 'node'):
                return None

            # 查找指定名称的节点
            for node in onnx_model.graph.node:
                if node.name == layer_name:
                    return node

            self.logger.warning(f"Layer not found: {layer_name}")
            return None

        except Exception as e:
            self.logger.error(f"Failed to get layer: {str(e)}")
            return None

    def replace_layer(self, model: Any, layer_name: str, new_layer: Any) -> bool:
        """
        替换模型中的层

        Args:
            model: ONNX模型
            layer_name: 层名称
            new_layer: 新层

        Returns:
            是否成功替换
        """
        try:
            # ONNX模型的修改通常比较复杂，需要使用特定的工具
            self.logger.warning(
                "ONNX layer replacement requires specialized tools like ONNX Runtime extensions or external conversion")

            # 这里仅提供一个概念性实现，实际应用中应使用更专业的工具
            if not new_layer or not isinstance(new_layer, self.onnx.NodeProto):
                self.logger.error("New layer must be an ONNX NodeProto object")
                return False

            # 从模型字典中提取ONNX模型
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
            else:
                onnx_model = model

            # 在模型中查找并替换节点
            replaced = False
            if hasattr(onnx_model, 'graph') and hasattr(onnx_model.graph, 'node'):
                for i, node in enumerate(onnx_model.graph.node):
                    if node.name == layer_name:
                        # 替换节点
                        onnx_model.graph.node.remove(node)
                        onnx_model.graph.node.insert(i, new_layer)
                        replaced = True
                        break

            if not replaced:
                self.logger.warning(f"Layer not found: {layer_name}")
                return False

            # 检查修改后的模型是否仍然有效
            try:
                self.onnx.checker.check_model(onnx_model)
            except Exception as e:
                self.logger.error(f"Modified model is invalid: {str(e)}")
                return False

            return True

        except Exception as e:
            self.logger.error(f"Failed to replace layer: {str(e)}")
            return False

    def generate_sample_input(self, model: Any) -> Any:
        """
        为ONNX模型生成示例输入

        Args:
            model: ONNX模型

        Returns:
            示例输入
        """
        try:
            # 从模型字典中提取会话
            if isinstance(model, dict) and "session" in model:
                session = model["session"]
            else:
                self.logger.error("Model does not contain a valid session")
                return None

            # 获取模型的输入信息
            inputs = session.get_inputs()
            if not inputs:
                self.logger.error("Model has no inputs")
                return None

            # 创建输入字典
            input_dict = {}
            for input in inputs:
                input_name = input.name
                input_shape = input.shape
                input_type = input.type

                # 处理动态维度
                shape = []
                for dim in input_shape:
                    if not isinstance(dim, int) or dim <= 0:
                        # 动态维度，使用默认值
                        if len(shape) == 0:
                            # 批次维度
                            shape.append(1)
                        else:
                            # 其他维度
                            shape.append(64)
                    else:
                        shape.append(dim)

                # 根据类型创建随机输入
                if "float" in input_type:
                    input_dict[input_name] = np.random.randn(*shape).astype(np.float32)
                elif "int" in input_type:
                    input_dict[input_name] = np.random.randint(0, 10, size=shape).astype(np.int64)
                else:
                    self.logger.warning(f"Unsupported input type: {input_type}, using float32")
                    input_dict[input_name] = np.random.randn(*shape).astype(np.float32)

            return input_dict

        except Exception as e:
            self.logger.error(f"Failed to generate sample input: {str(e)}")
            # 返回None，调用者需要处理这种情况
            return None

    def measure_inference_time(self, model: Any, input_data: Any, num_runs: int = 10) -> float:
        """
        测量ONNX模型推理时间

        Args:
            model: ONNX模型
            input_data: 输入数据
            num_runs: 运行次数

        Returns:
            平均推理时间（毫秒）
        """
        try:
            # 从模型字典中提取会话
            if isinstance(model, dict) and "session" in model:
                session = model["session"]
            else:
                self.logger.error("Model does not contain a valid session")
                return 0.0

            # 如果没有提供输入数据，生成示例输入
            if input_data is None:
                input_data = self.generate_sample_input(model)
                if input_data is None:
                    self.logger.error("Failed to generate sample input")
                    return 0.0

            # 预热
            for _ in range(3):
                _ = session.run(None, input_data)

            # 测量时间
            start_time = time.time()
            for _ in range(num_runs):
                _ = session.run(None, input_data)
            end_time = time.time()

            # 计算平均时间（毫秒）
            avg_time = (end_time - start_time) / num_runs * 1000

            return avg_time

        except Exception as e:
            self.logger.error(f"Failed to measure inference time: {str(e)}")
            return 0.0

    def measure_memory_usage(self, model: Any, input_data: Any = None) -> float:
        """
        测量ONNX模型内存使用

        Args:
            model: ONNX模型
            input_data: 输入数据

        Returns:
            内存用量（MB）
        """
        try:
            # 从模型字典中提取会话
            if isinstance(model, dict) and "session" in model:
                session = model["session"]
            else:
                self.logger.error("Model does not contain a valid session")
                return 0.0

            # 如果没有提供输入数据，生成示例输入
            if input_data is None:
                input_data = self.generate_sample_input(model)
                if input_data is None:
                    self.logger.error("Failed to generate sample input")
                    return 0.0

            # 开始内存追踪
            tracemalloc.start()

            # 运行推理
            _ = session.run(None, input_data)

            # 获取内存快照
            current, peak = tracemalloc.get_traced_memory()

            # 停止内存追踪
            tracemalloc.stop()

            # 返回峰值内存（MB）
            memory_mb = peak / (1024 * 1024)

            return memory_mb

        except Exception as e:
            self.logger.error(f"Failed to measure memory usage: {str(e)}")
            return 0.0

    def evaluate_accuracy(self, model: Any, eval_data: Any) -> float:
        """
        评估ONNX模型准确率

        Args:
            model: ONNX模型
            eval_data: 评估数据

        Returns:
            准确率
        """
        try:
            # 从模型字典中提取会话
            if isinstance(model, dict) and "session" in model:
                session = model["session"]
            else:
                self.logger.error("Model does not contain a valid session")
                return 0.0

            # 检查评估数据格式
            if not isinstance(eval_data, tuple) or len(eval_data) != 2:
                self.logger.error("eval_data should be a tuple of (inputs, labels)")
                return 0.0

            inputs, labels = eval_data

            # 如果输入是列表、元组或字典，假定包含多个样本
            if isinstance(inputs, (list, tuple)):
                correct = 0
                total = len(inputs)

                for i in range(total):
                    input_sample = inputs[i]
                    label = labels[i]

                    # 运行推理
                    outputs = session.run(None, input_sample)

                    # 获取预测
                    prediction = np.argmax(outputs[0])

                    # 检查是否正确
                    if prediction == label:
                        correct += 1

                accuracy = correct / total

            elif isinstance(inputs, dict):
                # 运行推理
                outputs = session.run(None, inputs)

                # 获取预测
                predictions = np.argmax(outputs[0], axis=1)

                # 计算准确率
                accuracy = np.mean(predictions == labels)

            else:
                self.logger.error("Unsupported eval_data format")
                return 0.0

            return float(accuracy)

        except Exception as e:
            self.logger.error(f"Failed to evaluate accuracy: {str(e)}")
            return 0.0

    def fine_tune(self, model: Any, train_data: Any, iterations: int = 5,
                  early_stopping: bool = True) -> Any:
        """
        微调ONNX模型

        Args:
            model: ONNX模型
            train_data: 训练数据
            iterations: 训练迭代次数
            early_stopping: 是否使用早停

        Returns:
            微调后的模型
        """
        self.logger.error("Fine-tuning is not directly supported for ONNX models. Convert to source framework first.")
        return model

    def get_model_size(self, model: Any) -> int:
        """
        获取ONNX模型大小（字节）

        Args:
            model: ONNX模型

        Returns:
            模型大小（字节）
        """
        try:
            # 从模型字典中提取模型路径
            if isinstance(model, dict) and "path" in model:
                model_path = model["path"]
                if os.path.exists(model_path):
                    return os.path.getsize(model_path)

            # 尝试直接获取ONNX模型的大小
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
            else:
                onnx_model = model

            # 序列化模型并获取字节大小
            model_proto = self.onnx.ModelProto()
            model_proto.CopyFrom(onnx_model)
            return len(model_proto.SerializeToString())

        except Exception as e:
            self.logger.error(f"Failed to get model size: {str(e)}")
            return 0

    def get_parameter_count(self, model: Any) -> int:
        """
        获取ONNX模型参数数量

        Args:
            model: ONNX模型

        Returns:
            参数数量
        """
        try:
            # 从模型字典中提取ONNX模型
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
            else:
                onnx_model = model

            param_count = 0

            # 遍历所有初始化器
            if hasattr(onnx_model, 'graph') and hasattr(onnx_model.graph, 'initializer'):
                for initializer in onnx_model.graph.initializer:
                    # 获取维度
                    dims = initializer.dims

                    # 计算元素总数
                    num_elements = 1
                    for dim in dims:
                        num_elements *= dim

                    param_count += num_elements

            return param_count

        except Exception as e:
            self.logger.error(f"Failed to get parameter count: {str(e)}")
            return 0

    def measure_layer_latency(self, model: Any, sample_input: Any) -> Dict[str, float]:
        """
        测量ONNX模型各层的延迟

        Args:
            model: ONNX模型
            sample_input: 样本输入

        Returns:
            各层延迟字典
        """
        self.logger.warning("Layer-wise latency measurement is not directly supported in ONNX Runtime")
        return {}

    def clone_model(self, model: Any) -> Any:
        """
        克隆ONNX模型

        Args:
            model: 原始模型

        Returns:
            克隆后的模型
        """
        try:
            # 从模型字典中提取ONNX模型
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
                session = model.get("session")
                path = model.get("path")
            else:
                onnx_model = model
                session = None
                path = None

            # 创建新的ModelProto对象
            new_model = self.onnx.ModelProto()
            new_model.CopyFrom(onnx_model)

            # 如果有会话，需要重新创建
            new_session = None
            if session:
                # 序列化模型
                model_bytes = new_model.SerializeToString()

                # 从字节创建新会话
                new_session = self.ort.InferenceSession(model_bytes)

            # 创建新的模型字典
            result = {
                "model": new_model,
                "session": new_session,
                "path": path
            }

            return result

        except Exception as e:
            self.logger.error(f"Failed to clone model: {str(e)}")
            return model

    def export_model(self, model: Any, output_path: str, format: str = "native") -> str:
        """
        导出ONNX模型到指定格式

        Args:
            model: ONNX模型
            output_path: 输出路径
            format: 导出格式

        Returns:
            导出文件路径
        """
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)

            # 从模型字典中提取ONNX模型
            if isinstance(model, dict) and "model" in model:
                onnx_model = model["model"]
            else:
                onnx_model = model

            # 导出模型
            if format == "native" or format == "onnx":
                # 标准ONNX格式
                self.onnx.save(onnx_model, output_path)
                return output_path
            else:
                self.logger.error(f"Unsupported export format for ONNX: {format}")
                return ""

        except Exception as e:
            self.logger.error(f"Failed to export model: {str(e)}")
            return ""