from torch import nn
from typing import Union, Iterable, Tuple, List


class VGG(nn.Module):
    """
    :parameter
        outsize 卷积层结束之后输出的feature map的长或者宽.
        out_channels 卷积层结束后输出的通道数
    """

    def __init__(self, out_size, out_channels):
        super(VGG, self).__init__()
        self._vgg_blocks = None
        self._dropout = nn.Dropout(p=0.5)
        self._flatten = nn.Flatten()
        self._active = nn.ReLU()
        self._linear1 = nn.Linear(out_channels * out_size * out_size, 4096)
        self._linear2 = nn.Linear(4096, 4096)
        self._linear3 = nn.Linear(4096, 10)

    def forward(self, x):
        for layer in self.__vgg_blocks:
            x = layer(x)
        x = self._flatten(x)
        x = self._linear1(x)
        x = self._active(x)
        x = self._dropout(x)
        x = self._linear2(x)
        x = self._active(x)
        x = self._dropout(x)
        x = self._linear3(x)
        return x

    @staticmethod
    def __get_vgg_block(in_channel: int, out_channel: int, num_convs=3):
        layers = []
        for i in range(num_convs):
            layers.append(nn.Conv2d(in_channel, out_channel, kernel_size=(3, 3), padding=1))
            layers.append(nn.ReLU())
            in_channel = out_channel
        return nn.Sequential(*layers, nn.MaxPool2d(kernel_size=(2, 2), padding=2))

    # conv_arch:是一个可迭代变量，他的每个元素是一个双元素元组，由conv数目和每一层的输出通道构成。
    def set_vgg_blocks(self, in_channels: int, conv_arch: Iterable[Union[List[int], Tuple[int, int]]]):
        self.__vgg_blocks = []
        for conv_nums, out_channel in conv_arch:
            self.__vgg_blocks.append(self.__get_vgg_block(in_channels, out_channel, conv_nums))
            in_channels = out_channel
