import torch
from torch import nn
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv = nn.Sequential(
            # 输入: 1 * 28 * 28卷积, 单通道, 6个卷积核, 大小5 * 5 ==》6 * 24 * 24
            nn.Conv2d(1, 6, 5),
            nn.ReLU(),
            # 输入：6 * 24 * 24池化，卷积核2 * 2，步长2 ==》6 * 12 * 12
            nn.MaxPool2d(2, 2),
            # 输入：6 * 12 * 12卷积，卷积核5 * 5，16个卷积核 ==》 16 * 8 * 8
            nn.Conv2d(6, 16, 5),
            nn.ReLU(),
            # 输入：16 * 8 * 8池化，卷积核2 * 2, 步长2 ==》 16 * 4 * 4
            nn.MaxPool2d(2, 2),
            # 相当于进行一次120个卷积核的 4 * 4卷积(原文中是5 * 5(和最后一次池化结果的特征图大小一致)，其实是要让特征图变成 1 * 1, 所以这边是4 * 4)
            # nn.Conv2d(16, 120, 4)

        )
        self.fc = nn.Sequential(
            nn.Linear(16*4*4, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, 10)
        )

    def forward(self, img):
        # 打印出特征图大小
        # feature = self.conv[0](img)
        # print(feature.shape)
        # feature = self.conv[1](feature)
        # print(feature.shape)
        # feature = self.conv[2](feature)
        # print(feature.shape)
        # feature = self.conv[3](feature)
        # print(feature.shape)
        # feature = self.conv[4](feature)
        # print(feature.shape)
        # feature = self.conv[5](feature)
        # print(feature.shape)
        feature = self.conv(img)
        output = self.fc(feature.view(img.shape[0], -1))
        return output