# uamcf/loaders/model_loader.py
import os
import torch
import logging
from typing import Any, Dict, Optional, Tuple

logger = logging.getLogger("ModelLoader")


class ModelLoader:
    """通用模型加载器，支持多种格式"""

    def __init__(self, device=None):
        # 设置默认设备
        if device is None:
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        else:
            self.device = torch.device(device)

        logger.info(f"模型加载器初始化，设备: {self.device}")

    def load(self, model_path: str) -> Tuple[Any, str, Dict]:
        """
        加载模型文件

        Args:
            model_path: 模型文件路径

        Returns:
            model: 加载的模型
            format_type: 模型格式类型
            metadata: 模型元数据
        """
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件不存在: {model_path}")

        # 根据文件扩展名确定加载方式
        ext = os.path.splitext(model_path)[1].lower()

        if ext in ['.pth', '.pt']:
            return self._load_pytorch(model_path)
        elif ext in ['.onnx']:
            return self._load_onnx(model_path)
        elif ext in ['.pb', '.tflite']:
            return self._load_tensorflow(model_path)
        else:
            raise ValueError(f"不支持的模型格式: {ext}")

    def _load_pytorch(self, model_path: str) -> Tuple[Any, str, Dict]:
        """加载PyTorch模型"""
        try:
            # 尝试直接加载完整模型
            try:
                model = torch.load(model_path, map_location=self.device)
                if isinstance(model, torch.nn.Module):
                    logger.info(f"成功加载完整PyTorch模型: {model_path}")
                    return model.to(self.device), "pytorch", {"has_architecture": True}
            except Exception as e:
                logger.warning(f"无法加载完整模型: {str(e)}")

            # 尝试作为权重字典加载
            state_dict = torch.load(model_path, map_location=self.device)
            if isinstance(state_dict, dict):
                # 这里仅有权重，需要推断架构
                architecture = self._infer_architecture_from_weights(state_dict)
                if architecture:
                    model = architecture()
                    model.load_state_dict(state_dict)
                    model = model.to(self.device)
                    model.eval()
                    logger.info(f"加载PyTorch权重并推断架构: {model_path}")
                    return model, "pytorch", {"has_architecture": False, "inferred": True}
                else:
                    logger.error("无法从权重推断模型架构")
                    # 返回权重字典以便后续处理
                    return state_dict, "pytorch_weights", {"has_architecture": False}

            raise ValueError("无法识别的PyTorch模型格式")

        except Exception as e:
            logger.error(f"加载PyTorch模型失败: {str(e)}")
            raise

    def _load_onnx(self, model_path: str) -> Tuple[Any, str, Dict]:
        """加载ONNX模型"""
        try:
            import onnx
            model = onnx.load(model_path)
            logger.info(f"成功加载ONNX模型: {model_path}")
            return model, "onnx", self._extract_onnx_metadata(model)
        except ImportError:
            logger.error("ONNX不可用。请安装onnx库: pip install onnx")
            raise
        except Exception as e:
            logger.error(f"加载ONNX模型失败: {str(e)}")
            raise

    def _load_tensorflow(self, model_path: str) -> Tuple[Any, str, Dict]:
        """加载TensorFlow/TFLite模型"""
        try:
            import tensorflow as tf
            if model_path.endswith('.tflite'):
                interpreter = tf.lite.Interpreter(model_path=model_path)
                interpreter.allocate_tensors()
                logger.info(f"成功加载TFLite模型: {model_path}")
                return interpreter, "tflite", {}
            else:
                model = tf.saved_model.load(model_path)
                logger.info(f"成功加载TensorFlow模型: {model_path}")
                return model, "tensorflow", {}
        except ImportError:
            logger.error("TensorFlow不可用。请安装tensorflow库: pip install tensorflow")
            raise
        except Exception as e:
            logger.error(f"加载TensorFlow模型失败: {str(e)}")
            raise

    def _infer_architecture_from_weights(self, state_dict: Dict) -> Optional[Any]:
        """
        基于权重字典推断模型架构
        这是一个关键的挑战，需要启发式方法来识别常见模型
        """
        try:
            from torchvision import models as tv_models

            # 获取层名称列表
            layer_names = list(state_dict.keys())

            # 检查是否为ResNet系列
            if 'layer1.0.conv1.weight' in state_dict and 'fc.weight' in state_dict:
                fc_shape = state_dict['fc.weight'].shape
                num_classes = fc_shape[0]

                # 根据参数数量判断ResNet深度
                total_params = sum(p.numel() for p in state_dict.values())

                if total_params < 15_000_000:
                    logger.info(f"推断为ResNet18，输出类别数: {num_classes}")
                    model_class = tv_models.resnet18
                elif total_params < 30_000_000:
                    logger.info(f"推断为ResNet34，输出类别数: {num_classes}")
                    model_class = tv_models.resnet34
                elif total_params < 50_000_000:
                    logger.info(f"推断为ResNet50，输出类别数: {num_classes}")
                    model_class = tv_models.resnet50
                else:
                    logger.info(f"推断为ResNet101，输出类别数: {num_classes}")
                    model_class = tv_models.resnet101

                return lambda: model_class(num_classes=num_classes)

            # 检查是否为VGG系列
            elif 'features.0.weight' in state_dict and 'classifier.6.weight' in state_dict:
                classifier_shape = state_dict['classifier.6.weight'].shape
                num_classes = classifier_shape[0]

                # 根据层数判断VGG变种
                if len(layer_names) < 100:
                    logger.info(f"推断为VGG11，输出类别数: {num_classes}")
                    model_class = tv_models.vgg11
                else:
                    logger.info(f"推断为VGG16，输出类别数: {num_classes}")
                    model_class = tv_models.vgg16

                return lambda: model_class(num_classes=num_classes)

            # 添加更多模型类型的检测逻辑...
            # 例如MobileNet, EfficientNet, BERT等常见模型

            logger.warning("无法识别的模型架构，请手动指定")
            return None

        except Exception as e:
            logger.error(f"推断架构失败: {str(e)}")
            return None

    def _extract_onnx_metadata(self, model) -> Dict:
        """从ONNX模型中提取元数据"""
        metadata = {}
        try:
            # 提取输入、输出信息
            inputs = [input.name for input in model.graph.input]
            outputs = [output.name for output in model.graph.output]
            metadata["inputs"] = inputs
            metadata["outputs"] = outputs

            # 提取操作符信息
            op_types = {}
            for node in model.graph.node:
                op_type = node.op_type
                op_types[op_type] = op_types.get(op_type, 0) + 1
            metadata["operators"] = op_types

            # 提取模型版本
            metadata["opset_version"] = model.opset_import[0].version

        except Exception as e:
            logger.warning(f"提取ONNX元数据时出错: {str(e)}")

        return metadata