import torch
import torch.nn as nn
import torchvision.models as models


class MyCNN(nn.Module):
    def __init__(self, num_classes, init_weights=True):
        super(MyCNN, self).__init__()
        self.conv1_1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=(1, 1))
        self.conv1_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=(1, 1))
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2_1 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=(1, 1))
        self.conv2_2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=(1, 1))
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv3_1 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=(1, 1))
        self.conv3_2 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=(1, 1))
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv4_1 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=(1, 1))
        self.conv4_2 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=(1, 1))
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv5_1 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=(1, 1))
        self.conv5_2 = nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=3, padding=(1, 1))
        self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.fc1 = nn.Linear(25600, 4096)
        self.dropout_fc1 = nn.Dropout(0.5)

        self.fc2 = nn.Linear(4096, 4096)
        self.dropout_fc2 = nn.Dropout(0.5)

        self.fc3 = nn.Linear(4096, num_classes)

        self.relu = nn.ReLU()

        if init_weights:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)
                elif isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)
                elif isinstance(m, nn.Linear):
                    nn.init.normal_(m.weight, 0, 0.01)
                    nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.conv1_1(x)
        x = self.relu(self.conv1_2(x))
        x = self.pool1(x)
        x = self.conv2_1(x)
        x = self.relu(self.conv2_2(x))
        x = self.pool2(x)
        x = self.conv3_1(x)
        x = self.relu(self.conv3_2(x))
        x = self.pool3(x)
        x = self.conv4_1(x)
        x = self.relu(self.conv4_2(x))
        x = self.pool4(x)
        x = self.conv5_1(x)
        x = self.relu(self.conv5_2(x))
        x = self.pool5(x)

        x = x.view(-1, 25600)
        x = self.relu(self.fc1(x))
        x = self.dropout_fc1(x)
        x = self.relu(self.fc2(x))
        x = self.dropout_fc2(x)
        x = self.fc3(x)
        return x


def create_my_cnn(num_classes):
    return MyCNN(num_classes)


def create_resnet50(num_classes, pretrained=True):
    # 创建resnet50模型
    resnet50 = models.resnet50(pretrained=pretrained)

    if pretrained:
        exclude_layers = ['layer1', 'layer2', 'layer3']  # 这些层不训练
        for name, param in resnet50.named_parameters():
            for layer in exclude_layers:
                if name.startswith(layer):
                    param.requires_grad = False
                    break

    feature_size = resnet50.fc.in_features
    resnet50.fc = nn.Linear(feature_size, num_classes)
    return resnet50
