from typing import Dict, Type, Any, List

import torch
import torch.nn as nn


class ModelRegistry:
    """模型注册表，支持插件化模型管理"""

    _models: Dict[str, Type[nn.Module]] = {}

    @classmethod
    def register(cls, model_name: str) -> Any:
        """模型注册装饰器"""

        def wrapper(model_class: Type[nn.Module]) -> Type[nn.Module]:
            if model_name in cls._models:
                raise ValueError(f"模型名称 '{model_name}' 已存在")
            cls._models[model_name] = model_class
            return model_class

        return wrapper

    @classmethod
    def get_model(cls, model_name: str) -> Type[nn.Module]:
        """获取注册的模型类"""
        model_class = cls._models.get(model_name)
        if not model_class:
            raise ValueError(f"未找到模型 '{model_name}'，已注册模型: {list(cls._models.keys())}")
        return model_class

    @classmethod
    def list_models(cls) -> List[str]:
        """列出所有注册的模型"""
        return list(cls._models.keys())


# 注册现有模型和新模型
@ModelRegistry.register("price_mlp")
class PriceMLP(nn.Module):
    """基础MLP价格预测模型，包含特征注意力机制"""

    def __init__(self, input_dim: int, output_dim: int, hidden_dims: List[int],
                 dropout_rate: float = 0.25, attention: bool = True):
        super().__init__()
        self.attention = attention

        # 特征注意力机制
        if self.attention:
            self.feature_attention = nn.Sequential(
                nn.Linear(input_dim, input_dim),
                nn.Sigmoid()
            )

        # 构建特征提取器
        feature_layers = []
        prev_dim = input_dim
        for dim in hidden_dims[:3]:  # 前3层作为特征提取器
            feature_layers.extend([
                nn.Linear(prev_dim, dim),
                nn.BatchNorm1d(dim),
                nn.SiLU(),
                nn.Dropout(dropout_rate if dim != hidden_dims[2] else dropout_rate * 0.8)
            ])
            prev_dim = dim

        self.feature_extractor = nn.Sequential(*feature_layers)

        # 构建分类器
        classifier_layers = []
        for dim in hidden_dims[3:]:  # 剩余层作为分类器
            classifier_layers.extend([
                nn.Linear(prev_dim, dim),
                nn.BatchNorm1d(dim),
                nn.SiLU()
            ])
            prev_dim = dim

        # 输出层
        classifier_layers.append(nn.Linear(prev_dim, output_dim))
        self.classifier = nn.Sequential(*classifier_layers)

        self._initialize_weights()

    def _initialize_weights(self) -> None:
        """初始化网络权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播"""
        if self.attention:
            attn_weights = self.feature_attention(x)
            x = x * attn_weights

        features = self.feature_extractor(x)
        logits = self.classifier(features)
        return logits


@ModelRegistry.register("price_transformer")
class PriceTransformer(nn.Module):
    """基于Transformer的价格预测模型（新增）"""

    def __init__(self, input_dim: int, output_dim: int, hidden_dims: List[int],
                 dropout_rate: float = 0.25, attention: bool = True):
        super().__init__()

        # 投影层将特征转换为适合Transformer的维度
        self.projection = nn.Linear(input_dim, hidden_dims[0])

        # Transformer编码器层
        transformer_layer = nn.TransformerEncoderLayer(
            d_model=hidden_dims[0],
            nhead=4,
            dim_feedforward=hidden_dims[1],
            dropout=dropout_rate,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(transformer_layer, num_layers=2)

        # 分类头
        classifier_layers = []
        prev_dim = hidden_dims[0]
        for dim in hidden_dims[2:]:
            classifier_layers.extend([
                nn.Linear(prev_dim, dim),
                nn.BatchNorm1d(dim),
                nn.SiLU(),
                nn.Dropout(dropout_rate)
            ])
            prev_dim = dim

        classifier_layers.append(nn.Linear(prev_dim, output_dim))
        self.classifier = nn.Sequential(*classifier_layers)

        self._initialize_weights()

    def _initialize_weights(self) -> None:
        """初始化网络权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播"""
        # 添加序列维度以适应Transformer
        x = x.unsqueeze(1)  # (batch_size, 1, input_dim)

        # 投影到Transformer维度
        x = self.projection(x)  # (batch_size, 1, hidden_dims[0])

        # Transformer编码
        x = self.transformer(x)  # (batch_size, 1, hidden_dims[0])
        x = x.squeeze(1)  # (batch_size, hidden_dims[0])

        # 分类
        logits = self.classifier(x)
        return logits


@ModelRegistry.register("multi_task_model")
class MultiTaskModel(nn.Module):
    """多任务学习模型（新增）"""

    def __init__(self, input_dim: int, output_dims: Dict[str, int], hidden_dims: List[int],
                 dropout_rate: float = 0.25, attention: bool = True):
        super().__init__()
        self.attention = attention
        self.output_dims = output_dims

        # 特征注意力机制
        if self.attention:
            self.feature_attention = nn.Sequential(
                nn.Linear(input_dim, input_dim),
                nn.Sigmoid()
            )

        # 共享特征提取器
        shared_layers = []
        prev_dim = input_dim
        for dim in hidden_dims[:3]:
            shared_layers.extend([
                nn.Linear(prev_dim, dim),
                nn.BatchNorm1d(dim),
                nn.SiLU(),
                nn.Dropout(dropout_rate)
            ])
            prev_dim = dim

        self.shared_extractor = nn.Sequential(*shared_layers)

        # 任务特定头
        self.task_heads = nn.ModuleDict()
        for task, out_dim in output_dims.items():
            head_layers = []
            task_prev_dim = prev_dim
            for dim in hidden_dims[3:]:
                head_layers.extend([
                    nn.Linear(task_prev_dim, dim),
                    nn.BatchNorm1d(dim),
                    nn.SiLU(),
                    nn.Dropout(dropout_rate * 0.5)
                ])
                task_prev_dim = dim
            head_layers.append(nn.Linear(task_prev_dim, out_dim))
            self.task_heads[task] = nn.Sequential(*head_layers)

    def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
        """前向传播，返回所有任务的输出"""
        if self.attention:
            attn_weights = self.feature_attention(x)
            x = x * attn_weights

        shared_features = self.shared_extractor(x)

        # 为每个任务生成输出
        outputs = {}
        for task, head in self.task_heads.items():
            outputs[task] = head(shared_features)

        return outputs