import torch
import torch.nn as nn
from efficientnet_pytorch import EfficientNet
import os
from config import ROOT_DIR  # 从config导入ROOT_DIR
from torchvision import models


class AIModel(nn.Module):
    def __init__(self, efficientnet_type="efficientnet-b0"):
        super(AIModel, self).__init__()
        # 检查预训练权重文件是否存在
        weights_path = os.path.join(ROOT_DIR, 'weights', 'efficientnet-b0-weights.pth')
        if os.path.exists(weights_path):
            print(f"使用本地预训练权重: {weights_path}")
            # 先加载预训练权重
            pretrained_state_dict = torch.load(weights_path)
            
            # 创建模型
            self.efficientnet = EfficientNet.from_name(
                efficientnet_type,
                num_classes=1,
                include_top=True
            )
            
            # 移除分类层的权重
            pretrained_state_dict.pop('_fc.weight', None)
            pretrained_state_dict.pop('_fc.bias', None)
            
            # 加载特征提取部分的权重
            self.efficientnet.load_state_dict(pretrained_state_dict, strict=False)
            print("成功加载预训练权重（特征提取部分）")
        else:
            print("未找到预训练权重文件，将从头训练模型")
            self.efficientnet = EfficientNet.from_name(
                efficientnet_type,
                num_classes=1,
                include_top=True
            )
        
        # 添加Dropout层
        self.dropout = nn.Dropout(0.5)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.efficientnet(x)
        x = self.dropout(x)  # 添加Dropout
        x = self.sigmoid(x)
        return x


class BaggingModel(nn.Module):
    def __init__(self, models):
        super(BaggingModel, self).__init__()
        self.models = nn.ModuleList(models)

    def forward(self, x):
        outputs = []
        for model in self.models:
            outputs.append(model(x))
        outputs = torch.stack(outputs, dim=0)
        output = torch.mean(outputs, dim=0)
        return output


class ExpertModel(nn.Module):
    """专家模型基类，可以使用不同的骨干网络"""
    def __init__(self, backbone_type, pretrained=True):
        super(ExpertModel, self).__init__()
        self.backbone_type = backbone_type
        self.ema_enabled = False
        self.feature_dim = self._init_backbone(backbone_type, pretrained)
        
        # 添加分类头
        self.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(self.feature_dim, 1),
            nn.Sigmoid()
        )
        
    def _init_backbone(self, backbone_type, pretrained):
        """初始化不同类型的骨干网络"""
        if 'efficientnet' in backbone_type:
            model = EfficientNet.from_pretrained(backbone_type) if pretrained else \
                   EfficientNet.from_name(backbone_type)
            feature_dim = model._fc.in_features
            self.backbone = model
            # 移除原始分类层
            self.backbone._fc = nn.Identity()
            return feature_dim
        elif 'convnext' in backbone_type:
            # 使用torchvision中的ConvNeXt模型
            if backbone_type == 'convnext_tiny':
                model = models.convnext_tiny(pretrained=pretrained)
                feature_dim = 768
            elif backbone_type == 'convnext_small':
                model = models.convnext_small(pretrained=pretrained)
                feature_dim = 768
            elif backbone_type == 'convnext_base':
                model = models.convnext_base(pretrained=pretrained)
                feature_dim = 1024
            # 移除原始分类层
            model.classifier = nn.Identity()
            self.backbone = model
            return feature_dim
        else:
            raise ValueError(f"不支持的骨干网络类型: {backbone_type}")
    
    def forward(self, x):
        features = self.backbone(x)
        output = self.classifier(features)
        return output, features
    
    def enable_ema(self, decay=0.999):
        """启用EMA"""
        self.ema_enabled = True
        self.ema_decay = decay
        self.ema_weights = {}
        
        # 初始化EMA权重
        for name, param in self.named_parameters():
            if param.requires_grad:
                self.ema_weights[name] = param.data.clone()
    
    def update_ema(self):
        """更新EMA权重"""
        if not self.ema_enabled:
            return
            
        for name, param in self.named_parameters():
            if param.requires_grad:
                self.ema_weights[name] = self.ema_weights[name] * self.ema_decay + \
                                        param.data * (1.0 - self.ema_decay)
    
    def apply_ema(self):
        """应用EMA权重进行推理"""
        if not self.ema_enabled:
            return
            
        stored_params = {}
        for name, param in self.named_parameters():
            if param.requires_grad:
                stored_params[name] = param.data.clone()
                param.data.copy_(self.ema_weights[name])
        
        return stored_params
    
    def restore_params(self, stored_params):
        """恢复原始参数"""
        for name, param in self.named_parameters():
            if param.requires_grad and name in stored_params:
                param.data.copy_(stored_params[name])


class MFF_MoE(nn.Module):
    """多特征融合专家混合模型"""
    def __init__(self):
        super(MFF_MoE, self).__init__()
        
        # 创建多个专家模型
        self.experts = nn.ModuleList([
            ExpertModel("efficientnet-b0"),  # 使用现有的EfficientNet-B0
            ExpertModel("efficientnet-b2"),  # 添加EfficientNet-B2
            ExpertModel("convnext_tiny")     # 添加ConvNeXt Tiny
        ])
        
        # 可以根据资源情况添加更多专家:
        # self.experts.append(ExpertModel("efficientnet-b4"))
        # self.experts.append(ExpertModel("convnext_small"))
        
        # 为所有专家启用EMA
        for expert in self.experts:
            expert.enable_ema()
    
    def forward(self, x, use_ema=False):
        # 临时存储每个专家的原始参数
        stored_params_list = []
        
        if use_ema:
            # 应用EMA权重
            for expert in self.experts:
                stored_params = expert.apply_ema()
                stored_params_list.append(stored_params)
        
        # 获取每个专家的输出和特征
        outputs = []
        features = []
        
        for expert in self.experts:
            output, feature = expert(x)
            outputs.append(output)
            features.append(feature)
        
        if use_ema and stored_params_list:
            # 恢复原始参数
            for expert, stored_params in zip(self.experts, stored_params_list):
                expert.restore_params(stored_params)
        
        # 计算输出的平均值
        final_output = torch.mean(torch.cat(outputs, dim=1), dim=1, keepdim=True)
        
        return final_output
    
    def update_ema(self):
        """更新所有专家的EMA权重"""
        for expert in self.experts:
            expert.update_ema()
