import paddle
import paddle.nn.functional as F
from paddle.metric import Accuracy
from minist import load_minist

# 用paddle.nn下的API，如Conv2D、MaxPool2D、Linear完成LeNet的构建
# Conv2D、MaxPool2D 和 Linear 是其中一些常用的层类型，它们分别对应卷积层、最大池化层和全连接层,
# 这些是构建神经网络尤其是卷积神经网络CNNS时非常重要的组件。
# 比如先通过多个 Conv2D 层和 MaxPool2D 层提取特征，然后展平（Flatten）成一维向量后接上一个或几个 Linear 层来进行分类或其他任务
class LeNet(paddle.nn.Layer):
    def __init__(self):
        super().__init__()
        self.conv1 = paddle.nn.Conv2D(
            in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=2
        )
        self.max_pool1 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)
        self.conv2 = paddle.nn.Conv2D(
            in_channels=6, out_channels=16, kernel_size=5, stride=1
        )
        self.max_pool2 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)
        self.linear1 = paddle.nn.Linear(
            in_features=16 * 5 * 5, out_features=120
        )
        self.linear2 = paddle.nn.Linear(in_features=120, out_features=84)
        self.linear3 = paddle.nn.Linear(in_features=84, out_features=10)

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.max_pool1(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = self.max_pool2(x)
        x = paddle.flatten(x, start_axis=1, stop_axis=-1)
        x = self.linear1(x)
        x = F.relu(x)
        x = self.linear2(x)
        x = F.relu(x)
        x = self.linear3(x)
        return x


# 通过paddle提供的Model 构建实例，使用封装好的训练与测试接口，快速完成模型训练与测试。
def model_train(train_dataset):

    model = paddle.Model(LeNet())  # 用Model封装模型
    optim = paddle.optimizer.Adam(
        learning_rate=0.001, parameters=model.parameters()
    )

    # 配置模型
    model.prepare(optim, paddle.nn.CrossEntropyLoss(), Accuracy())
    # 训练模型
    model.fit(train_dataset, epochs=2, batch_size=64, verbose=1)
    return model

# 使用model.evaluate来预测
def evaluate_test(test_dataset,model):
    model.evaluate(test_dataset, batch_size=64, verbose=1)

if __name__=='__main__':
    train_dataset,test_dataset =  load_minist()
    model  = model_train(train_dataset)
    # save(path, training=True)
    model.save('checkpoint/test')  # save for training
    evaluate_test(test_dataset,model)