# -*- coding: utf-8 -*-
"""
@author: YuHaiyang

"""

import torch.nn
from torch import nn

"""

相对于 AlexNet 优势：
1. 更深的网络结构
VGG16 比 AlexNet 更深，它包括16个卷积和全连接层，相对较小的卷积核尺寸（3x3），以及更深的网络结构有助于捕获更复杂的特征，提高了性能。
2. 简单且统一的架构
VGG16 采用了非常简单和统一的卷积层架构，所有卷积层都使用了相同大小的卷积核，这种设计的简洁性使得它更易于理解和修改。
3. 预训练模型
VGG16 在大规模图像数据集上进行了训练，因此它的权重可以用作预训练模型，然后微调用于特定任务，这对于迁移学习和快速开发视觉任务非常有用。

PyTorch
https://pytorch.org/hub/pytorch_vision_vgg/
"""


class Vgg16(nn.Module):
    __version__ = "1.0.0"

    def __init__(self, num_classes: int = 10, dropout: float = 0.5):
        super().__init__()
        """
        input size: 224*224*3
        input size: 32*32*3
        """
        self.features = torch.nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, padding='same', stride=1),
            nn.Conv2d(64, 64, kernel_size=3, padding='same', stride=1),  # 224 * 224 * 64  | 32 * 32 * 64
            nn.MaxPool2d(kernel_size=2, stride=2),  # 112 * 112 * 64

            nn.Conv2d(64, 128, kernel_size=3, padding='same', stride=1),
            nn.Conv2d(128, 128, kernel_size=3, padding='same', stride=1),  # 112 * 112 * 128 | 16 * 16 * 128
            nn.MaxPool2d(kernel_size=2, stride=2),  # 56 * 56 * 128 |  8 * 8 * 128

            nn.Conv2d(128, 256, kernel_size=3, padding='same', stride=1),
            nn.Conv2d(256, 256, kernel_size=3, padding='same', stride=1),
            nn.Conv2d(256, 256, kernel_size=3, padding='same', stride=1),  # 56 * 56 * 256
            nn.MaxPool2d(kernel_size=2, stride=2),  # 28 * 28 * 256 |  4 * 4 * 256

            nn.Conv2d(256, 512, kernel_size=3, padding='same', stride=1),
            nn.Conv2d(512, 512, kernel_size=3, padding='same', stride=1),
            nn.Conv2d(512, 512, kernel_size=3, padding='same', stride=1),  # 28 * 28 * 512
            nn.MaxPool2d(kernel_size=2, stride=2),  # 14 * 14 * 512  |  2 * 2 * 512

            nn.Conv2d(512, 512, kernel_size=3, padding='same', stride=1),
            nn.Conv2d(512, 512, kernel_size=3, padding='same', stride=1),
            nn.Conv2d(512, 512, kernel_size=3, padding='same', stride=1),  # 14 * 14 * 512
            nn.MaxPool2d(kernel_size=2, stride=2),  # 7 * 7 * 512  |  1 * 1 * 256
        )

        self.classifier = nn.Sequential(
            # nn.Linear(1 * 1 * 512, 4096),
            nn.Linear(7 * 7 * 512, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(p=dropout),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(p=dropout),
            nn.Linear(4096, num_classes),
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.features(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x
