import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet50, ResNet50_Weights

class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()
        assert kernel_size in (3, 7), "kernel size must be 3 or 7"
        padding = 3 if kernel_size == 7 else 1

        self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        # 沿着通道维度计算平均值和最大值
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        
        # 拼接平均值和最大值
        x_cat = torch.cat([avg_out, max_out], dim=1)
        
        # 应用卷积和sigmoid激活
        x_out = self.conv(x_cat)
        
        # 返回注意力图和原始特征的乘积
        return self.sigmoid(x_out) * x

class ResNet50Model(nn.Module):
    def __init__(self, num_classes=10, pretrained=True):
        super(ResNet50Model, self).__init__()
        # 加载预训练的ResNet50模型，使用最新的权重
        if pretrained:
            self.model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
        else:
            self.model = resnet50(weights=None)
        
        # 直接替换最后的全连接层，与1.ipynb保持一致
        self.model.fc = nn.Linear(2048, num_classes)
        
        # 初始化fc层权重
        nn.init.xavier_uniform_(self.model.fc.weight)
        if self.model.fc.bias is not None:
            nn.init.constant_(self.model.fc.bias, 0)
    
    def forward(self, x):
        return self.model(x)

class ResNet50WithAttention(nn.Module):
    def __init__(self, num_classes=10, pretrained=True):
        super(ResNet50WithAttention, self).__init__()
        # 加载预训练的ResNet50模型
        if pretrained:
            self.resnet = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
        else:
            self.resnet = resnet50(weights=None)
        
        # 修改最后的全连接层以适应我们的类别数
        in_features = self.resnet.fc.in_features
        self.resnet.fc = nn.Identity()  # 移除原始的fc层
        
        # 添加空间注意力机制
        self.attention = SpatialAttention(kernel_size=7)
        
        # 添加自定义分类器
        self.classifier = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(in_features, num_classes)
        )
        
        # 初始化权重
        self._initialize_weights()
    
    def _initialize_weights(self):
        for m in self.classifier.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        # 提取特征
        x = self.resnet.conv1(x)
        x = self.resnet.bn1(x)
        x = self.resnet.relu(x)
        x = self.resnet.maxpool(x)
        
        x = self.resnet.layer1(x)
        x = self.resnet.layer2(x)
        x = self.resnet.layer3(x)
        x = self.resnet.layer4(x)
        
        # 应用空间注意力
        x = self.attention(x)
        
        # 分类
        x = self.classifier(x)
        
        return x

class AdvancedResNet50(nn.Module):
    """
    高级ResNet50模型，包含以下优化：
    1. 冻结前几层参数，只训练后面的层
    2. 添加Dropout层减少过拟合
    3. 使用多层分类器提高特征提取能力
    4. 使用Xavier初始化改善收敛性能
    """
    def __init__(self, num_classes=10, dropout_rate=0.2, pretrained=True):
        super(AdvancedResNet50, self).__init__()
        # 加载预训练的ResNet50模型
        if pretrained:
            self.model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
        else:
            self.model = resnet50(weights=None)
        
        # 冻结前几层参数，只训练后面的层
        for param in list(self.model.parameters())[:-30]:  # 冻结除最后几层外的所有层
            param.requires_grad = False
            
        # 替换分类器
        in_features = self.model.fc.in_features
        self.model.fc = nn.Sequential(
            nn.Dropout(dropout_rate),
            nn.Linear(in_features, 512),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(512, num_classes)
        )
        
        # 初始化新添加的层
        for m in self.model.fc.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        return self.model(x)

# Focal Loss实现，用于处理类别不平衡问题
class FocalLoss(nn.Module):
    def __init__(self, alpha=1, gamma=2, reduction='mean'):
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction
        self.ce_loss = nn.CrossEntropyLoss(reduction='none')
        
    def forward(self, inputs, targets):
        ce_loss = self.ce_loss(inputs, targets)
        pt = torch.exp(-ce_loss)
        focal_loss = self.alpha * (1 - pt) ** self.gamma * ce_loss
        
        if self.reduction == 'mean':
            return focal_loss.mean()
        elif self.reduction == 'sum':
            return focal_loss.sum()
        else:
            return focal_loss

def get_model(num_classes=10, pretrained=True, use_attention=False, use_advanced=False):
    """
    获取ResNet50模型实例
    
    Args:
        num_classes: 分类类别数
        pretrained: 是否使用预训练权重
        use_attention: 是否使用注意力机制
        use_advanced: 是否使用高级ResNet50模型
    
    Returns:
        model: 模型实例
    """
    if use_attention:
        return ResNet50WithAttention(num_classes=num_classes, pretrained=pretrained)
    elif use_advanced:
        return AdvancedResNet50(num_classes=num_classes, pretrained=pretrained)
    else:
        return ResNet50Model(num_classes=num_classes, pretrained=pretrained)