import torch
import torch.nn as nn

"""
创建LeNet模型
卷积 >> 池化 >> 卷积 >> 池化 >> 卷积+展平 >> 全连接 >> 全连接（输出）
卷积 >> 池化 >> 卷积 >> 池化 >> 展平+全连接 >> 全连接 >> 全连接（输出）
整体就两大部分
1. 卷积池化激活  特征提取
2. 全连接       分类器
"""


class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.conv3 = nn.Conv2d(16, 120, 5)
        self.avg_pool = nn.AvgPool2d(2)
        self.tanh = nn.Tanh()
        self.flat = nn.Flatten()
        self.softmax = nn.LogSoftmax(dim=-1)
        self.fc1 = nn.Linear(120, 84)
        self.fc2 = nn.Linear(84, 10)
        # super().__init__()
        # self.c1 = nn.Conv2d(1, 6, 5)
        # self.c2 = nn.Conv2d(6, 16, 5)
        # self.c3 = nn.Conv2d(16, 120, 5)
        # self.avg_pool = nn.AvgPool2d(2)
        # self.flatten = nn.Flatten()
        # self.tanh = nn.Tanh()
        # self.softmax = nn.LogSoftmax(dim=-1)
        # self.f1 = nn.Linear(120, 84)
        # self.f2 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.tanh(self.avg_pool(self.tanh(self.conv1(x))))
        x = self.tanh(self.avg_pool(self.tanh(self.conv2(x))))
        x = self.tanh(self.conv3(x))
        x = self.flat(x)
        x = self.tanh(self.fc1(x))
        x = self.softmax(self.fc2(x))
        return x
        # x = self.c1(x)  # (8, 6, 28, 28)
        # x = self.tanh(x)
        # x = self.avg_pool(x)  # (8, 6, 14, 14)
        # x = self.tanh(x)
        # x = self.c2(x)  # (8, 16, 10, 10)
        # x = self.tanh(x)
        # x = self.avg_pool(x)  # (8, 16, 5, 5)
        # x = self.tanh(x)
        # x = self.c3(x)  # (8, 120, 1, 1)
        # x = self.tanh(x)
        # x = self.flatten(x)  # (8, 120)
        # x = self.f1(x)  # (8, 84)
        # x = self.tanh(x)
        # x = self.f2(x)  # (8, 10)
        # y = self.softmax(x)
        # return y


if __name__ == '__main__':
    model = LeNet()
    x = torch.rand(8, 1, 32, 32)
    y = model(x)
    print(y.shape)

    from torchsummary import summary
    summary(model, (1, 32, 32), batch_size=8, device='cpu')
