import torch
from torch import nn
from torchvision import transforms
from torchsummary import summary
"""
卷积：卷积核=3x3，步幅=1，padding=1
池化：池化核=2x2，步幅=2
全连接层之间：一个激活函数，3个全连接，2个relu
"""
class Inception(nn.Module):
    def __init__(self,in_channels:int,c1_out_channels:int,c2_out_channels:tuple,c3_out_channels:tuple,c4_out_channels:int):
        '''
        :param in_channels: 输入的通道数
        :param c1_out_channels: 第一个路径的输出通道数
        :param c2_params: (第二路径的输出通道数，第二路径的第二个输出通道)
        :param c3_params: (第3路径的输出通道数，第3路径的第二个输出通道)
        :param c4_out_channels: 第4路径的输出通道数
        '''
        super(Inception,self).__init__()
        # 路径1：1x1卷积
        self.c1 = nn.Sequential(
            nn.Conv2d(in_channels=in_channels,out_channels=c1_out_channels,kernel_size=1),
            nn.ReLU()
        )
        # 路径2：1x1卷积，3x3卷积
        self.c2 = nn.Sequential(
            nn.Conv2d(in_channels=in_channels,out_channels=c2_out_channels[0],kernel_size=1),
            nn.ReLU(),
            nn.Conv2d(in_channels=c2_out_channels[0],out_channels=c2_out_channels[1],kernel_size=3,padding=1),
            nn.ReLU(),
        )
        # 路径3：1x1卷积，5x5卷积
        self.c3 = nn.Sequential(
            nn.Conv2d(in_channels=in_channels,out_channels=c3_out_channels[0],kernel_size=1),
            nn.ReLU(),
            nn.Conv2d(in_channels=c3_out_channels[0],out_channels=c3_out_channels[1],kernel_size=5,padding=2),
            nn.ReLU(),
        )
        # 路径4：最大池化，1x1卷积
        self.c4 = nn.Sequential(
            ## 注意池化层的步幅默认与池化核一样，切记
            nn.MaxPool2d(kernel_size=3,padding=1,stride=1),
            ## 卷积的步幅默认是1，padding=0
            nn.Conv2d(in_channels=in_channels, out_channels=c4_out_channels, kernel_size=1),
            nn.ReLU(),
        )


    def forward(self,input):
        c1 = self.c1(input)
        c2 = self.c2(input)
        c3 = self.c3(input)
        c4 = self.c4(input)
        # 按照通道相加
        return torch.cat((c1,c2,c3,c4),dim=1)


class GoogLetNetModel(nn.Module):
    IMG_SIZE = 224
    IMG_CHANNELS = 1
    PTH_PATH = './model_files/best-model.pth'
    def __init_params(self):
        ## 初始化参数
        for m in self.modules():
            # 卷积层参数的初始化，w和b参数
            if isinstance(m, nn.Conv2d):
                ## 对于卷积，使用凯明初始化（对w初始化）
                nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
                ## 对b参数进行初始
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            # 全连接成参数的初始化
            elif isinstance(m, nn.Linear):
                ## 默认使用正态分布
                nn.init.normal_(m.weight, 0, 0.01)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def __init__(self):
        super(GoogLetNetModel,self).__init__()
        ## 初始化参数
        self.__init_params()

        ## 输入的第一块
        self.b1 = nn.Sequential(
            nn.Conv2d(in_channels=self.IMG_CHANNELS,out_channels=64,kernel_size=7,stride=2,padding=3),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3,stride=2,padding=1),
        )
        ## 输入的第二块
        self.b2 = nn.Sequential(
            nn.Conv2d(in_channels=64,out_channels=64,kernel_size=1),
            nn.ReLU(),
            nn.Conv2d(in_channels=64,out_channels=192,kernel_size=3,padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
        )
        ## 28x28x192


        ## 2个inception+1个maxpool
        self.b3 = nn.Sequential(
            Inception(in_channels=192,c1_out_channels=64,c2_out_channels=(96,128),c3_out_channels=(16,32),c4_out_channels=32),
            # in_channels = 64+128+32+32=256
            Inception(in_channels=256,c1_out_channels=128,c2_out_channels=(128,192),c3_out_channels=(32,96),c4_out_channels=64),
            # channels = 128+192+96+64=480
            nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
        )

        ## 5个inception+1个maxpool
        self.b4 = nn.Sequential(
            ## in_channels = 480
            Inception(
                in_channels=480, c1_out_channels=192, c2_out_channels=(96, 208), c3_out_channels=(16, 48),c4_out_channels=64
            ),
            ## in_channel = 192+208+48+64=512
            Inception(
                in_channels=512, c1_out_channels=160, c2_out_channels=(112, 224), c3_out_channels=(24, 64),c4_out_channels=64
            ),
            ## in_channel = 160+224+64+64=512
            Inception(
                in_channels=512, c1_out_channels=128, c2_out_channels=(128, 256), c3_out_channels=(24, 64),c4_out_channels=64
            ),
            # in_channel = 128+256+64+64=512
            Inception(
                in_channels=512, c1_out_channels=112, c2_out_channels=(144, 288), c3_out_channels=(32, 64),c4_out_channels=64
            ),
            # in_channels = 112+288+64+64
            Inception(
                in_channels=528, c1_out_channels=256, c2_out_channels=(160, 320), c3_out_channels=(32, 128),c4_out_channels=128
            ),
            # channels = 832
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )

        ## 3个inception块+
        self.b5 = nn.Sequential(
            # channels = 832
            Inception(
                in_channels=832, c1_out_channels=256, c2_out_channels=(160, 320), c3_out_channels=(32, 128),c4_out_channels=128
            ),
            # in_channels = 256+320+128+128=832
            Inception(
                in_channels=832, c1_out_channels=384, c2_out_channels=(192, 384), c3_out_channels=(48, 128),c4_out_channels=128
            ),
            # channels = 384+384+128+128=1024
            ## 全局平均池化
            ## 7x7x1024
            nn.AdaptiveAvgPool2d(output_size=(1,1)),
            ## 1x1x1024
            nn.Flatten(),
            ## 1024
            nn.Linear(in_features=1024,out_features=10)
        )

        # 前向传播过程
        self.output = nn.Sequential(
            self.b1,
            self.b2,
            self.b3,
            self.b4,
            self.b5
        )


    def forward(self,input):
        return self.output(input)

if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = GoogLetNetModel()
    model = model.to(device)
    print(summary(model, (1, 224, 224)))