import torch.nn as nn
import torch.nn.functional as F
import torch

from torchvision.models import (
    vgg11, vgg13, vgg16, vgg19,
    resnet18, resnet34, resnet50, resnet101, resnet152,
    densenet121, densenet169, densenet201, densenet161,
    efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3,
    efficientnet_b4, efficientnet_b5, efficientnet_b6, efficientnet_b7
)
from transformers import ViTForImageClassification, SwinForImageClassification
from transformers import AutoImageProcessor, AutoModelForImageClassification

class VGG_Backbone(nn.Module):
    def __init__(self, version='vgg16', pretrained=False):
        super(VGG_Backbone, self).__init__()
        
        # 选择VGG版本
        if version == 'vgg11':
            self.model = vgg11(pretrained=pretrained)
        elif version == 'vgg13':
            self.model = vgg13(pretrained=pretrained)
        elif version == 'vgg16':
            self.model = vgg16(pretrained=pretrained)
        elif version == 'vgg19':
            self.model = vgg19(pretrained=pretrained)
        else:
            raise ValueError(f"Unsupported VGG version: {version}")
        
        # 删除VGG的avgpool和classifier层
        self.model.avgpool = nn.Identity()
        self.model.classifier = nn.Identity()
        self.features = self.model.features

    def forward(self, x):
        return self.features(x)


class ResNet_Backbone(nn.Module):
    def __init__(self, version='resnet18', pretrained=False):
        super(ResNet_Backbone, self).__init__()
        
        # 选择ResNet版本
        if version == 'resnet18':
            self.model = resnet18(pretrained=pretrained)
        elif version == 'resnet34':
            self.model = resnet34(pretrained=pretrained)
        elif version == 'resnet50':
            self.model = resnet50(pretrained=pretrained)
        elif version == 'resnet101':
            self.model = resnet101(pretrained=pretrained)
        elif version == 'resnet152':
            self.model = resnet152(pretrained=pretrained)
        else:
            raise ValueError(f"Unsupported ResNet version: {version}")
        
        # 删除ResNet的全局平均池化层和全连接层
        self.model.avgpool = nn.Identity()
        self.model.fc = nn.Identity()
        self.features = nn.Sequential(
            self.model.conv1,
            self.model.bn1,
            self.model.relu,
            self.model.maxpool,
            self.model.layer1,
            self.model.layer2,
            self.model.layer3,
            self.model.layer4
        )

    def forward(self, x):
        return self.features(x)


class DenseNet_Backbone(nn.Module):
    def __init__(self, version='densenet121', pretrained=False):
        super(DenseNet_Backbone, self).__init__()
        
        # 选择DenseNet版本
        if version == 'densenet121':
            self.model = densenet121(pretrained=pretrained)
        elif version == 'densenet169':
            self.model = densenet169(pretrained=pretrained)
        elif version == 'densenet201':
            self.model = densenet201(pretrained=pretrained)
        elif version == 'densenet161':
            self.model = densenet161(pretrained=pretrained)
        else:
            raise ValueError(f"Unsupported DenseNet version: {version}")
        
        # 删除DenseNet的全局平均池化层和全连接层
        self.model.avgpool = nn.Identity()
        self.model.classifier = nn.Identity()
        self.features = self.model.features

    def forward(self, x):
        return self.features(x)


class EfficientNet_Backbone(nn.Module):
    def __init__(self, version='efficientnet_b0', pretrained=False):
        super(EfficientNet_Backbone, self).__init__()
        
        # 选择EfficientNet版本
        if version == 'efficientnet_b0':
            self.model = efficientnet_b0(pretrained=pretrained)
        elif version == 'efficientnet_b1':
            self.model = efficientnet_b1(pretrained=pretrained)
        elif version == 'efficientnet_b2':
            self.model = efficientnet_b2(pretrained=pretrained)
        elif version == 'efficientnet_b3':
            self.model = efficientnet_b3(pretrained=pretrained)
        elif version == 'efficientnet_b4':
            self.model = efficientnet_b4(pretrained=pretrained)
        elif version == 'efficientnet_b5':
            self.model = efficientnet_b5(pretrained=pretrained)
        elif version == 'efficientnet_b6':
            self.model = efficientnet_b6(pretrained=pretrained)
        elif version == 'efficientnet_b7':
            self.model = efficientnet_b7(pretrained=pretrained)
        else:
            raise ValueError(f"Unsupported EfficientNet version: {version}")
        
        # 删除EfficientNet的全局平均池化层和全连接层
        self.model.avgpool = nn.Identity()
        self.model.classifier = nn.Identity()
        self.features = self.model.features

    def forward(self, x):
        return self.features(x)


class ViT_Backbone(nn.Module):
    def __init__(self, version='vit_base_patch16_224', pretrained=False):
        super(ViT_Backbone, self).__init__()
        
        # 加载预训练的ViT模型
        self.model = ViTForImageClassification.from_pretrained(
            f"google/{version}" if pretrained else None,
            num_labels=0  # 不加载分类头
        )
        
    def forward(self, x):
        # ViT的输入需要调整为(B, C, H, W)
        outputs = self.model(x)
        return outputs.last_hidden_state  # 返回特征图


class SwinTransformer_Backbone(nn.Module):
    def __init__(self, version='swin_tiny_patch4_window7_224', pretrained=False):
        super(SwinTransformer_Backbone, self).__init__()
        
        # 加载预训练的Swin Transformer模型
        self.model = SwinForImageClassification.from_pretrained(
            f"microsoft/{version}" if pretrained else None,
            num_labels=0  # 不加载分类头
        )
        
    def forward(self, x):
        # Swin Transformer的输入需要调整为(B, C, H, W)
        outputs = self.model(x)
        return outputs.last_hidden_state  # 返回特征图
    

class UNet_Backbone(nn.Module):
    def __init__(self, in_channels=3, out_channels=128, pretrained=False):
        super(UNet_Backbone, self).__init__()
        
        # 编码器部分
        self.encoder1 = self._conv_block(in_channels, 64)
        self.encoder2 = self._conv_block(64, 128)
        self.encoder3 = self._conv_block(128, 256)
        self.encoder4 = self._conv_block(256, 512)
        
        # 解码器部分
        self.decoder1 = self._upconv_block(512, 256)
        self.decoder2 = self._upconv_block(256, 128)
        self.decoder3 = self._upconv_block(128, 64)
        
        # 最终输出层
        self.final_conv = nn.Conv2d(64, out_channels, kernel_size=1)
        
    def _conv_block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
    
    def _upconv_block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )
    
    def forward(self, x):
        # 编码器部分
        e1 = self.encoder1(x)  # (B, 64, 52, 52)
        e2 = self.encoder2(e1)  # (B, 128, 26, 26)
        e3 = self.encoder3(e2)  # (B, 256, 13, 13)
        e4 = self.encoder4(e3)  # (B, 512, 6, 6)
        
        # 解码器部分
        d1 = self.decoder1(e4)  # (B, 256, 12, 12)
        # 调整 e3 的尺寸以匹配 d1
        if not torch.equal(torch.tensor(d1.shape[2:]), torch.tensor(e3.shape[2:])):
            e3 = F.interpolate(e3, size=d1.shape[2:], mode='bilinear', align_corners=False)
        d1 = d1 + e3  # 跳跃连接
        
        d2 = self.decoder2(d1)  # (B, 128, 24, 24)
        # 调整 e2 的尺寸以匹配 d2
        if not torch.equal(torch.tensor(d2.shape[2:]), torch.tensor(e2.shape[2:])):
            e2 = F.interpolate(e2, size=d2.shape[2:], mode='bilinear', align_corners=False)
        d2 = d2 + e2  # 跳跃连接
        
        d3 = self.decoder3(d2)  # (B, 64, 48, 48)
        # 调整 e1 的尺寸以匹配 d3
        if not torch.equal(torch.tensor(d3.shape[2:]), torch.tensor(e1.shape[2:])):
            e1 = F.interpolate(e1, size=d3.shape[2:], mode='bilinear', align_corners=False)
        d3 = d3 + e1  # 跳跃连接
        
        # 最终输出
        out = self.final_conv(d3)  # (B, out_channels, 48, 48)
        return out