﻿import torch
import torch.nn as nn
import torch.nn.functional as F

from torchsummary import summary


'''
VGG-11：包含 11 层（8 个卷积层 + 3 个全连接层）。
VGG-13：包含 13 层（10 个卷积层 + 3 个全连接层）。
VGG-16：包含 16 层（13 个卷积层 + 3 个全连接层）。这是 VGG 中最常用的版本之一
VGG-19：包含 19 层（16 个卷积层 + 3 个全连接层）

5大块的基本结构不变
'''


class VGG16(nn.Module):
    """
    Neural network model consisting of layers proposed by VGG16 architecture.
    """
    def __init__(self, num_classes=10):
        """
        Define and allocate layers for this neural net.

        Args:
            num_classes (int): number of classes to predict with this model
        """
        super(VGG16,self).__init__()
        

        #########227
        self.features = nn.Sequential(
            # Block 1
            nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),  # (b x 64 x 32 x 32)  32-3+1*2+1=32
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),  # (b x 64 x 32 x 32)    #vgg11
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # (b x 64 x 16 x 16)
            
            # Block 2
            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),  # (b x 128 x 16 x 16)
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),  # (b x 128 x 16 x 16)    #vgg11
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # (b x 128 x 8 x 8)
            
            # Block 3
            nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),  # (b x 256 x 8 x 8)
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),  # (b x 256 x 8 x 8)
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),  # (b x 256 x 8 x 8)    #vgg11
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # (b x 256 x 4 x 4)
            
            # Block 4
            nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),  # (b x 512 x 4 x 4)
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),  # (b x 512 x 4 x 4)    #vgg11
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),  # (b x 512 x 4 x 4)
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # (b x 512 x 2 x 2)
            
            # Block 5
            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),  # (b x 512 x 2 x 2)
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),  # (b x 512 x 2 x 2)
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),  # (b x 512 x 2 x 2)    #vgg11
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)  # (b x 512 x 1 x 1)
        )
        
        # Classifier layers
        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(in_features=512 * 1 * 1, out_features=4096),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(in_features=4096, out_features=4096),
            nn.ReLU(inplace=True),
            nn.Linear(in_features=4096, out_features=num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size(0), -1)  # flatten for linear layers
        return self.classifier(x)


class VGG11(nn.Module):
    """
    Neural network model consisting of layers proposed by VGG11 architecture.
    """
    def __init__(self, num_classes=10):
        """
        Define and allocate layers for this neural net.

        Args:
            num_classes (int): number of classes to predict with this model
        """
        super(VGG11, self).__init__()
        
        self.features = nn.Sequential(
            # Block 1
            nn.Conv2d(3, 64, kernel_size=3, padding=1),  # (b x 64 x 32 x 32)
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # (b x 64 x 16 x 16)
            
            # Block 2
            nn.Conv2d(64, 128, kernel_size=3, padding=1),  # (b x 128 x 16 x 16)
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # (b x 128 x 8 x 8)
            
            # Block 3
            nn.Conv2d(128, 256, kernel_size=3, padding=1),  # (b x 256 x 8 x 8)
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),  # (b x 256 x 8 x 8)
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # (b x 256 x 4 x 4)
            
            # Block 4
            nn.Conv2d(256, 512, kernel_size=3, padding=1),  # (b x 512 x 4 x 4)
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),  # (b x 512 x 4 x 4)
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),  # (b x 512 x 2 x 2)
            
            # Block 5
            nn.Conv2d(512, 512, kernel_size=3, padding=1),  # (b x 512 x 2 x 2)
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1),  # (b x 512 x 2 x 2)
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)  # (b x 512 x 1 x 1)
        )
        
        # Classifier layers
        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(512 * 1 * 1, 512),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(512, 512),
            nn.ReLU(inplace=True),
            nn.Linear(512, num_classes),
        )

    def forward(self, x):
        """
        Pass the input through the net.

        Args:
            x (Tensor): input tensor

        Returns:
            output (Tensor): output tensor
        """
        x = self.features(x)
        x = x.view(x.size(0), -1)  # flatten for linear layers
        return self.classifier(x)



def get_VGG16():
    model = VGG16()
    return model

def get_VGG11():
    model = VGG11()
    return model


if __name__=='__main__':
    modle = get_VGG16()
    print(modle)
    modle = modle.features.to('cuda')
    # summary(modle,(3,32,32))
    summary(modle,(3,224,224))