import paddle
import paddle.nn.functional as F

class LeNetModel(paddle.nn.Layer):
    def __init__(self):
        super(LeNetModel, self).__init__()
        # 定义卷积层1，设置输入通道数为1，输出通道数为6，卷积核的尺寸为5*5，步长为1
        self.conv1 = paddle.nn.Conv2D(in_channels=1, out_channels=6, kernel_size=5, stride=1)
        # 定义池化层1，设置池化的尺寸为2*2，步长为2
        self.pool1 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)
        # 定义卷积层2，设置输入通道数为6，输出通道数为16，卷积核的尺寸为5*5, 步长为1
        self.conv2 = paddle.nn.Conv2D(in_channels=6, out_channels=16, kernel_size=5, stride=1)
        # 定义池化层2，设置池化的尺寸为2*2，步长为2
        self.pool2 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)
        # 定义全连接层1, 输入维度是256，输出维度是120
        self.fc1 = paddle.nn.Linear(in_features=16 * 4 * 4, out_features=120)
        # 定义全连接层2，输入维度是120，输出维度是84
        self.fc2 = paddle.nn.Linear(in_features=120, out_features=84)
        # 定义全连接层3，输入维度是84，输出维度是10
        self.fc3 = paddle.nn.Linear(in_features=84, out_features=10)

    # 定义网络前向计算过程
    # 卷积层和全连接层的激活函数使用relu
    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = self.pool2(x)
        # 使用flatten()方法根据给定的start_axis和stop_axis将连续的维度”展平“
        x = paddle.flatten(x, start_axis=1, stop_axis=-1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        x = F.relu(x)
        x = self.fc3(x)
        return x
