import torch.nn as nn
import torch

# input:224*224 RGB
"""
0 conv3-64  *2
maxpool---------------
1 conv3-128 *2 
maxpool---------------
2 conv3-256 *2 conv1-256
maxpool---------------
3 conv3-512 *2 conv1-512
maxpool---------------
4 conv3-512 *2 conv1-512
maxpool---------------
5 FC-4096   *2 FC-1000
soft-max
"""
# 搭建网络


def VGG_block(in_ch, out_ch, use_k1x1=False):
    # k_size = (use_k1x1 and 1 or 3)  # ?:
    k_size = 3
    block = []
    block.append(
        nn.Conv2d(in_channels=in_ch,
                  out_channels=out_ch,
                  kernel_size=k_size,
                  padding=1))
    block.append(nn.BatchNorm2d(out_ch))
    block.append(nn.ReLU())
    block.append(
        nn.Conv2d(in_channels=out_ch,
                  out_channels=out_ch,
                  kernel_size=k_size,
                  padding=1))
    block.append(nn.BatchNorm2d(out_ch))
    block.append(nn.ReLU())

    if use_k1x1:
        block.append(
            nn.Conv2d(in_channels=out_ch,
                      out_channels=out_ch,
                      kernel_size=1,
                      padding=1))
        block.append(nn.BatchNorm2d(out_ch))
        block.append(nn.ReLU())

    block.append(nn.MaxPool2d(kernel_size=2, stride=2))

    return block


# input 从第二个VGG块开始 即输入conv(64,128,kernel_size=3)
class VGG16(nn.Module):

    def __init__(self, input_ch=64, block_count=4, mode="train"):
        super().__init__()
        self.mode = mode
        self.block_i = []
        self.block_0 = VGG_block(in_ch=3, out_ch=input_ch, use_k1x1=False)
        self.block_i.extend(self.block_0)
        # 1,2,3
        for count in range(1, block_count):
            # setattr(self,"abc",3) == self.abc =3
            setattr(self, "block_{}".format(count), VGG_block(input_ch,input_ch * 2, use_k1x1=(False if count == 1 else True)))
            input_ch *= 2
            self.out_ch = input_ch
        # setattr的反操作，返回值
        for count in range(1, block_count):
            self.block_i.extend(getattr(self, "block_{}".format(count)))

        # //向下取整除 因为block 的outch没有翻倍
        self.block_4 = VGG_block(in_ch=self.out_ch,
                                 out_ch=self.out_ch,
                                 use_k1x1=True)
        self.block_i.extend(self.block_4)
        # 作者改进了FC为conv
        self.block_5 = [
            # CBR module
            nn.Conv2d(
                in_channels=self.out_ch,
                out_channels=self.out_ch // 2,
                kernel_size=3,
            ),
            nn.BatchNorm2d(self.out_ch // 2),
            nn.ReLU(),
            nn.Conv2d(
                in_channels=self.out_ch // 2,
                out_channels=3,  # classes
                kernel_size=6)   # 256ch 6*6
        ]
        self.block_i.extend(self.block_5)
        self.result = nn.Sequential(*self.block_i)

    def forward(self, x):
        if self.mode == "test":
            return self.result(x).softmax(dim=1)

        else:
            return self.result(x)


if __name__ == '__main__':
    from torchsummary import summary
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    net = VGG16(mode='test')
    net = net.to(device=device)
    result = summary((net), (3, 224, 224))

    x = torch.randn(2, 3, 224, 224, device=device)
    out =net(x)
    print(out.shape)  # torch.Size([2, 3, 1, 1])
    # print(x)
    print(out)

