"""google net in pytorch
    https://arxiv.org/abs/1409.4842v1
"""

import torch
import torch.nn as nn


class BaseConv2d(nn.Module):
    def __init__(self, in_channel, out_channel, **kwargs):
        super(BaseConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channel, out_channel, bias=False, **kwargs)
        # bias=False禁用偏置项（常见于与 BatchNorm 配合时）
        self.bn = nn.BatchNorm2d(out_channel)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        return x


class Inception(nn.Module):
    def __init__(self, input_channels, ch1x1, ch3x3_reduce, ch3x3, ch5x5_reduce, ch5x5, pool_proj):
        """
        in_channels 表示上一层输入的通道数， ch1x1 表示1x1卷积的个数
        ch3x3_reduce 表示3x3卷积之前ixi卷积的个数，ch3x3 表示3*3卷积的个数
        ch5x5_reduce 表示5x5卷积之前IxI卷积的个数，ch5x5 表示5x5卷积的个数
        pool_proj表示池化后IxI卷积的个数
        """
        super(Inception, self).__init__()

        # 1x1 conv branch
        self.branch_1 = nn.Sequential(BaseConv2d(input_channels, ch1x1, kernel_size=1))

        # 1x1conv -> 3x3conv branch
        self.branch_2 = nn.Sequential(
            BaseConv2d(input_channels, ch3x3_reduce, kernel_size=1),
            BaseConv2d(ch3x3_reduce, ch3x3, kernel_size=3, padding=1)
        )

        # 1x1conv -> 5x5conv branch
        # 我们使用2个3x3 conv滤波器堆叠而不是1个5x5滤波器，以获得参数较少的相同感受野
        self.branch_3 = nn.Sequential(
            BaseConv2d(input_channels, ch5x5_reduce, kernel_size=1),
            BaseConv2d(ch5x5_reduce, ch5x5, kernel_size=3, padding=1),
            BaseConv2d(ch5x5, ch5x5, kernel_size=3, padding=1),
        )

        # 3x3pooling -> 1x1conv
        # same conv
        self.branch_4 = nn.Sequential(
            nn.MaxPool2d(3, stride=1, padding=1),
            BaseConv2d(input_channels, pool_proj, kernel_size=1),
        )

    def forward(self, x):
        branch_1 = self.branch_1(x)
        branch_2 = self.branch_2(x)
        branch_3 = self.branch_3(x)
        branch_4 = self.branch_4(x)
        branch = [branch_1, branch_2, branch_3, branch_4]
        # 使用 PyTorch 的 torch.cat 函数将多个张量沿指定维度进行拼接：
        # dim=1 表示按通道维（channel）拼接，常见于 CNN 模块中（维度顺序一般是 [batch_size, channels, height, width]）。
        return torch.cat(branch, dim=1)


class GoogleNet(nn.Module):

    def __init__(self, num_class=1000):
        super(GoogleNet, self).__init__()
        self.part_1_layer = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
            # nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False),
            # nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 192, kernel_size=3, padding=1, bias=False),
            # nn.BatchNorm2d(192),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, stride=2, padding=1)
        )

        self.part_2_layer = nn.Sequential(
            Inception(192, 64, 96, 128, 16, 32, 32),
            Inception(256, 128, 128, 192, 32, 96, 64),
            nn.MaxPool2d(3, stride=2, padding=1)
        )

        self.part_3_layer = nn.Sequential(
            Inception(480, 192, 96, 208, 16, 48, 64),
            Inception(512, 160, 112, 224, 24, 64, 64),
            Inception(512, 128, 128, 256, 24, 64, 64),
            Inception(512, 112, 144, 288, 32, 64, 64),
            Inception(528, 256, 160, 320, 32, 128, 128),
            nn.MaxPool2d(3, stride=2, padding=1)
        )
        self.part_4_layer = nn.Sequential(
            Inception(832, 256, 160, 320, 32, 128, 128),
            Inception(832, 384, 192, 384, 48, 128, 128),
            # input feature size: 8*8*1024
            nn.AdaptiveAvgPool2d((1, 1))
        )
        self.part_5_layer = nn.Sequential(
            nn.Flatten(),
            # 研究发现，从全连接层到平均池化的转变使dropout的精度提高了约0.6%，但即使在去除全连接层后，使用dropout仍然是必不可少的。
            nn.Dropout(p=0.4),
            nn.Linear(1024, num_class)
        )

    def forward(self, x):
        x = self.part_1_layer(x)
        x = self.part_2_layer(x)
        x = self.part_3_layer(x)
        x = self.part_4_layer(x)
        x = self.part_5_layer(x)
        return x
