import torch
import torch.nn as nn
import torch.nn.functional as F



from torchsummary import summary

# 定义 Inception 模块，这是 GoogleNet 的核心模块之一

class Inception(nn.Module):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super(Inception, self).__init__()
        
        # 第一分支：1x1 卷积
        self.branch1 = nn.Conv2d(in_channels, ch1x1, kernel_size=1) # 1x1 卷积，(b,c,h,w)-->(224-1+1)/1+1=224,c=ch1x1
        
        # 第二分支：1x1 卷积 + 3x3 卷积
        self.branch2 = nn.Sequential(
            nn.Conv2d(in_channels, ch3x3red, kernel_size=1),    #3x3 卷积，(b,c,h,w)-->(224-3+1)/1+1=224,c=ch3x3red
            nn.Conv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)    #3x3 卷积，(b,c,h,w)-->(224-3+1)/1+1=224,c=ch3x3
        )
        
        # 第三分支：1x1 卷积 + 5x5 卷积
        self.branch3 = nn.Sequential(
            nn.Conv2d(in_channels, ch5x5red, kernel_size=1),    #3x3 卷积，(b,c,h,w)-->(224-5+1)/1+1=224,c=ch5x5red
            nn.Conv2d(ch5x5red, ch5x5, kernel_size=5, padding=2)    #3x3 卷积，(b,c,h,w)-->(224-5+1)/1+1=224,c=ch5x5
        )
        
        # 第四分支：3x3 最大池化 + 1x1 卷积
        self.branch4 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),   # 最大池化层, (尺寸不变) 224-3+1)/1+1=224,
            nn.Conv2d(in_channels, pool_proj, kernel_size=1)    # 1x1 卷积，(b,c,h,w)-->(224-1+1)/1+1=224,c=pool_proj
        )
    
    # 前向传播
    def forward(self, x):
        # 分别计算每个分支,b,h,w不变
        branch1 = self.branch1(x)   #c=ch1x1
        branch2 = self.branch2(x)   #c=ch3x3
        branch3 = self.branch3(x)   #c=ch5x5
        branch4 = self.branch4(x)   #c=pool_proj
        
        # 将四个分支的输出在通道维度上拼接
        outputs = [branch1, branch2, branch3, branch4] 
        return torch.cat(outputs, 1)  # 输出的通道数为每个分支通道数之和，按照通道维度链接, #c=ch1x1+ch3x3+ch5x5+pool_proj,（b,c,w,h）so cat(outputs, 1)表示在通道维度上拼接

# 定义 GoogleNet 主结构
class GoogleNet224(nn.Module):
    def __init__(self, num_classes=10):
        super(GoogleNet224, self).__init__()
        
        # 初始卷积层和池化层
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)  #conv2d-1 输入通道 3，输出通道 64,(尺寸减半)   (224-7+3*2)/2+1=112 ,(32-7+5)/2+1=16
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)   # 最大池化层 (尺寸减半) w/2=56 或8
        
        # 第二层卷积
        self.conv2 = nn.Conv2d(64, 64, kernel_size=1)                      #conv2d-3 1x1 卷积 （不变） w/2=56 或8
        self.conv3 = nn.Conv2d(64, 192, kernel_size=3, padding=1)          #conv2d-4 3x3 卷积   8-3+1*2+1=8   w/2=56 或8
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)   # 最大池化层w/4= (56-3+1*2)/2+1 =28   (8-3+1*2)/2+1=4,多余的padding无法计框住  

        # Inception 模块 3a 和 3b
        self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)      #192--》Inception-13：对应6-12层--》【2，4，6，7】64+128+32+32=256
        self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)    #256--》Inception-21：对应14-20层--》128+192+96+64=480
        self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)   # 最大池化层w/8=14   (4-3+1*2)/2+1=2,

        # Inception 模块 4a - 4e
        self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
        self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
        self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
        self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)    #528*2*2
        self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
        self.maxpool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)   # 最大池化层7*7，c=256+320+128+128=832  32*32的图像在此处变为1*1,c=832
        self.avgpool4_2 = nn.AvgPool2d(kernel_size=2,stride=2, padding=0)  # 为第二层输出准备的，将2*2图片变为1*1  
        # Inception 模块 5a 和 5b
        self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)  #256+320+128+128=832
        self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)#c=384+384+192+192=1024,

        # 全局平均池化层和全连接层

        self.avgpool = nn.AvgPool2d(kernel_size=7, stride=1)  # 输出尺寸为 1x1  14*14经过7*7的池化层，输出为1*1    在32*32时不可调用
        # self.avgpool_test1 = nn.AvgPool2d(kernel_size=1, stride=1)  #
        # self.avgpool_test2 = nn.AvgPool2d(kernel_size=2, stride=1)  #2-2+1=1

        
        self.dropout = nn.Dropout(p=0.4)                      # Dropout 层，防止过拟合
        self.fc = nn.Linear(528, num_classes)                # 全连接层输出类别数
        # self.fc = nn.Linear(528,128)                # 全连接层输出类别数
        # self.fc2 = nn.Linear(128, num_classes)                # 全连接层输出类别数

    # 前向传播过程
    def forward(self, x):
        x = F.relu(self.conv1(x))           # 第一个卷积层 + ReLU 激活
        x = self.maxpool1(x)                # 第一个池化层

        x = F.relu(self.conv2(x))           # 第二个卷积层 + ReLU 激活
        x = F.relu(self.conv3(x))           # 第三个卷积层 + ReLU 激活
        x = self.maxpool2(x)                # 第二个池化层

        x = self.inception3a(x)             # Inception 模块 3a
        x = self.inception3b(x)             # Inception 模块 3b
        x = self.maxpool3(x)                # 第三个池化层

        x = self.inception4a(x)             # Inception 模块 4a
        x = self.inception4b(x)             # Inception 模块 4b
        x = self.inception4c(x)             # Inception 模块 4c
        x = self.inception4d(x)             # Inception 模块 4d     528*2*2
        #第二大的在此输出如果是3*32*32到此处输出128*2112，删除后128是batch size
        #2112查看4d输出是528，2112=528*4，所以这个时候（b,c,h,w）=(128,528,2,2)，
        #修改pooling=nn.AvgPool2d(kernel_size=2, stride=2)，  # 第四个池化层，将2*2图片变为1*1
        x = self.avgpool4_2(x)             
        #还要记得输出变成了128*528*1*1，fc要改为接收528数据

        # x = self.inception4e(x)             # Inception 模块 4e
        # x = self.maxpool4(x)                # 第四个池化层

        # x = self.inception5a(x)             # Inception 模块 5a
        # x = self.inception5b(x)             # Inception 模块 5b

        # x = self.avgpool(x)                 #原始代码 全局平均池化层  57%
        # x = self.avgpool_test1(x)           # 全局平均池化层，但不缩小维度 53%（信息丢失了）
        # x = self.avgpool_test2(x)              #删除一开始的pooling，保留高维度特征平移不变 51（过拟合了）
        #方案4吧fc再降低一些维度


        # x = torch.flatten(x, 1)             # 将输出展平成一维向量()[batch_size, 1024, 1, 1]-->[batch_size, 1024] 
        x = x.view(x.size(0), -1)           # 将输出展平成一维向量,已知到最后维度是1024*1*1，可写为x.view(-1,1024)
        x = self.dropout(x)                 # Dropout 操作
        x = self.fc(x)
        # x = self.fc2(x)                     # 全连接层2
        #softmax激活
        x = F.softmax(x, dim=1)            # 应用 softmax 激活函数
        return x

def get_GoogleNet224():
    modle=GoogleNet224()
    return modle


if __name__=='__main__':
    modle = GoogleNet224()
    print(modle)
    modle = modle.to('cuda')
    summary(modle,(3,32,32))
    # summary(modle,(3,224,224))