from torchvision import models
import torch.nn as nn
from utils.coordconv import CoordConv
import torch.nn.functional as F
import torch
class Resnet50(nn.Module):
    def __init__(self, pretrained=True):
        super(Resnet50, self).__init__()
        self.model = models.resnet50(pretrained=pretrained)
        self.model.conv1 = nn.Conv2d(43, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
        num_ftrs = self.model.fc.in_features
        self.model.fc = nn.Linear(num_ftrs, 2)

class Coord_Resnet50(nn.Module):
    def __init__(self, with_r=False, pretrained=True):
        super(Coord_Resnet50, self).__init__()
        self.model = models.resnet50(pretrained=pretrained)
        self.model.conv1 = CoordConv(43, 64, with_r=with_r, kernel_size=7, stride=2, padding=3, bias=False)
        num_ftrs = self.model.fc.in_features
        self.model.fc = nn.Linear(num_ftrs, 2)


class VGG16(nn.Module):
    def __init__(self, pretrained=True):
        super(VGG16, self).__init__()
        self.model = models.vgg16(pretrained=pretrained)
        self.model.features[0] = nn.Conv2d(43, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
        num_ftrs = self.model.classifier[6].in_features
        self.model.classifier[6] = nn.Linear(num_ftrs, 2)
        
class Coord_VGG16(nn.Module):
    def __init__(self, pretrained=True):
        super(Coord_VGG16, self).__init__()
        self.model = models.vgg16(pretrained=pretrained)
        self.model.features[0] = CoordConv(43, 64, kernel_size=3, stride=1, padding=1)
        num_ftrs = self.model.classifier[6].in_features
        self.model.classifier[6] = nn.Linear(num_ftrs, 2)


class AlexNet(nn.Module):
    def __init__(self, num_classes=2, pretrained=True):
        super().__init__()

        self.features = nn.Sequential(
            # 第一层：浅层卷积
            nn.Conv2d(43, 64, kernel_size=7, stride=2, padding=3),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),

            # 第二层：中层卷积
            nn.Conv2d(64, 192, kernel_size=5, stride=1, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),

            # 第三层：深层卷积
            nn.Conv2d(192, 384, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),

            # 第四层：过渡层（移除危险池化）
            nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),

            # 自适应池化保障尺寸
            nn.AdaptiveAvgPool2d(output_size=(4, 4))
        )

        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(256 * 4 * 4, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)
        return self.classifier(x)

class Coord_AlexNet(nn.Module):
    def __init__(self, num_classes=2, pretrained=True):
        super().__init__()

        self.features = nn.Sequential(
            # 第一层：浅层卷积
            CoordConv(43, 64, kernel_size=7, stride=2, padding=3),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),

            # 第二层：中层卷积
            nn.Conv2d(64, 192, kernel_size=5, stride=1, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),

            # 第三层：深层卷积
            nn.Conv2d(192, 384, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),

            # 第四层：过渡层（移除危险池化）
            nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),

            # 自适应池化保障尺寸
            nn.AdaptiveAvgPool2d(output_size=(4, 4))
        )

        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(256 * 4 * 4, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)
        return self.classifier(x)


class LeNet(nn.Module):
    def __init__(self, input_channels=43, num_classes=2,pretrained=True):
        super().__init__()

        # 动态计算全连接层输入维度
        self.features = nn.Sequential(
            # 第一层卷积
            nn.Conv2d(input_channels, 32, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            # 第二层卷积
            nn.Conv2d(32, 64, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            # 第三层卷积
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            # 自适应池化保障尺寸
            nn.AdaptiveAvgPool2d(output_size=(4, 4))
        )

        # 动态获取展平后的输入维度
        test_input = torch.randn(1, input_channels, 50, 50)
        flattened_dim = self.features(test_input).size(1) * self.features(test_input).size(2) * self.features(
            test_input).size(3)

        self.classifier = nn.Sequential(
            nn.Linear(flattened_dim, 1024),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(1024, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)
        return self.classifier(x)

class Coord_LeNet(nn.Module):
    def __init__(self, input_channels=43, num_classes=2, pretrained=True):
        super().__init__()

        # 动态计算全连接层输入维度
        self.features = nn.Sequential(
            # 第一层卷积
            CoordConv(input_channels, 32, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            # 第二层卷积
            nn.Conv2d(32, 64, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            # 第三层卷积
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),

            # 自适应池化保障尺寸
            nn.AdaptiveAvgPool2d(output_size=(4, 4))
        )

        # 动态获取展平后的输入维度
        test_input = torch.randn(1, input_channels, 50, 50)
        flattened_dim = self.features(test_input).size(1) * self.features(test_input).size(2) * self.features(
            test_input).size(3)

        self.classifier = nn.Sequential(
            nn.Linear(flattened_dim, 1024),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(1024, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)
        return self.classifier(x)


def MyModel(model_name, pretrained=True):
    if model_name == 'resnet50':
        Model = Resnet50(pretrained=pretrained)
        return Model.model
    elif model_name == 'vgg16':
        Model = VGG16(pretrained=pretrained)
        return Model.model
    elif model_name == 'alexnet':
        Model = AlexNet(num_classes=2, pretrained=pretrained)
        return Model
    elif model_name == 'lenet':
        Model = LeNet(input_channels=43, num_classes=2, pretrained=pretrained)
        return Model
    elif model_name == 'coord_resnet50':
        Model = Coord_Resnet50(pretrained=pretrained)
        return Model.model
    elif model_name == 'coord_vgg16':
        Model = Coord_VGG16(pretrained=pretrained)
        return Model.model
    elif model_name == 'coord_alexnet':
        Model = Coord_AlexNet(num_classes=2, pretrained=pretrained)
        return Model
    elif model_name == 'coord_lenet':
        Model = Coord_LeNet(input_channels=43, num_classes=2, pretrained=pretrained)
        return Model
    else:
        print("模型不存在")
        return None



