{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import json\n",
    "import gzip\n",
    "import numpy as np\n",
    "import random\n",
    "import time\n",
    "import paddle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 声明数据集文件位置\n",
    "datafile = './datasets/mnist.json.gz'\n",
    "print('loading mnist dataset from {} ......'.format(datafile))\n",
    "# 加载json数据文件\n",
    "data = json.load(gzip.open(datafile))\n",
    "print('mnist dataset load done')\n",
    "\n",
    "# 读取到的数据区分训练集，验证集，测试集\n",
    "print(len(data[0][0]),len(data[0][1]),len(data[1][0]),len(data[1][1]),len(data[2][0]),len(data[2][1]))\n",
    "train_set, val_set, eval_set = data\n",
    "\n",
    "# 观察训练集数据\n",
    "imgs, labels = train_set[0], train_set[1]\n",
    "print(\"训练数据集数量: \", len(imgs))\n",
    "\n",
    "# 观察验证集数量\n",
    "imgs, labels = val_set[0], val_set[1]\n",
    "print(\"验证数据集数量: \", len(imgs))\n",
    "\n",
    "# 观察测试集数量\n",
    "imgs, labels = val= eval_set[0], eval_set[1]\n",
    "print(\"测试数据集数量: \", len(imgs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据乱序\n",
    "# yield用于从函数中返回一个值，但与其他返回语句不同，使用yield的函数将变成一个生成器函数。这意味着每次调用该函数时，它将运行到yield语句，返回一个值，并暂停执行，直到下一次迭代。\n",
    "imgs, labels = train_set[0], train_set[1]\n",
    "print(\"训练数据集数量: \", len(imgs))\n",
    "# 获得数据集长度\n",
    "imgs_length = len(imgs)\n",
    "# 定义数据集每个数据的序号，根据序号读取数据\n",
    "index_list = list(range(imgs_length))\n",
    "# 读入数据时用到的批次大小\n",
    "BATCHSIZE = 100\n",
    "\n",
    "# 随机打乱训练数据的索引序号\n",
    "random.shuffle(index_list)\n",
    "\n",
    "# 定义数据生成器，返回批次数据\n",
    "def data_generator():\n",
    "    imgs_list = []\n",
    "    labels_list = []\n",
    "    for i in index_list:\n",
    "        # 将数据处理成希望的类型\n",
    "        img = np.array(imgs[i]).astype('float32')\n",
    "        label = np.array(labels[i]).astype('float32')\n",
    "        imgs_list.append(img)\n",
    "        labels_list.append(label)\n",
    "        if len(imgs_list) == BATCHSIZE:\n",
    "            # 获得一个batchsize的数据，并返回\n",
    "            yield np.array(imgs_list), np.array(labels_list)\n",
    "            # 清空数据读取列表\n",
    "            imgs_list = []\n",
    "            labels_list = []\n",
    "\n",
    "    # 如果剩余数据的数目小于BATCHSIZE，\n",
    "    # 则剩余数据一起构成一个大小为len(imgs_list)的mini-batch\n",
    "    if len(imgs_list) > 0:\n",
    "        yield np.array(imgs_list), np.array(labels_list)\n",
    "    return data_generator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 声明数据读取函数，从训练集中读取数据\n",
    "train_loader = data_generator\n",
    "# 以迭代的形式读取数据\n",
    "for batch_id, data in enumerate(train_loader()):\n",
    "    image_data, label_data = data\n",
    "    if batch_id == 0:\n",
    "        # 打印数据shape和类型\n",
    "        print(\"打印第一个batch数据的维度:\")\n",
    "        print(\"图像维度: {}, 标签维度: {}\".format(image_data.shape, label_data.shape))\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据校验\n",
    "imgs_length = len(imgs)\n",
    "assert len(imgs) == len(labels), \"length of train_imgs({}) should be the same as train_labels({})\".format(len(imgs), len(labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 声明数据读取函数，从训练集中读取数据\n",
    "train_loader = data_generator\n",
    "# 以迭代的形式读取数据\n",
    "for batch_id, data in enumerate(train_loader()):\n",
    "    image_data, label_data = data\n",
    "    if batch_id == 0:\n",
    "        # 打印数据shape和类型\n",
    "        print(\"打印第一个batch数据的维度，以及数据的类型:\")\n",
    "        print(\"图像维度: {}, 标签维度: {}, 图像数据类型: {}, 标签数据类型: {}\".format(image_data.shape, label_data.shape, type(image_data), type(label_data)))\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets.generate import MnistDataset\n",
    "\n",
    "# 声明数据加载函数，使用训练模式，MnistDataset构建的迭代器每次迭代只返回batch=1的数据\n",
    "train_dataset = MnistDataset(mode='train')\n",
    "# 使用paddle.io.DataLoader 定义DataLoader对象用于加载Python生成器产生的数据，\n",
    "# DataLoader 返回的是一个批次数据迭代器，并且是异步的；\n",
    "data_loader = paddle.io.DataLoader(train_dataset, batch_size=100, shuffle=True)\n",
    "# 迭代的读取数据并打印数据的形状\n",
    "for i, data in enumerate(data_loader()):\n",
    "    images, labels = data\n",
    "    print(i, images.shape, labels.shape)\n",
    "    if i>=2:\n",
    "        break"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 基于前馈神经网络实现手写数字识别任务\n",
    "\n",
    "前馈神经网络作为最基础的神经网络，一般包含输入层、隐藏层和输出层。每层神经元只和相邻层神经元相连，即每层神经元只接收相邻前序神经层中神经元所传来的信息，只给相邻后续神经层中神经元传递信息。在前馈神经网络中，同一层的神经元之间没有任何连接，后续神经层不向其前序相邻神经层传递任何信息。前馈神经网络是目前应用最为广泛的神经网络之一。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义mnist数据识别网络结构\n",
    "class MNIST(paddle.nn.Layer):\n",
    "    def __init__(self):\n",
    "        super(MNIST, self).__init__()\n",
    "        # 定义一层全连接层\n",
    "        self.fc1 = paddle.nn.Linear(in_features=784, out_features=500)\n",
    "        self.act = paddle.nn.Sigmoid()\n",
    "        # self.act = paddle.nn.ReLU()\n",
    "        self.fc2 = paddle.nn.Linear(in_features=500, out_features=10)\n",
    "        self.softmax = paddle.nn.Softmax()\n",
    "\n",
    "    # 定义网络结构的前向计算过程\n",
    "    def forward(self, x):\n",
    "        outputs = self.fc1(x)\n",
    "        outputs = self.act(outputs)\n",
    "        outputs = self.fc2(outputs)\n",
    "        outputs = self.softmax(outputs)\n",
    "        return outputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "import paddle.nn.functional as F\n",
    "\n",
    "class Trainer(object):\n",
    "    def __init__(self, model_path, model, optimizer):\n",
    "        self.model_path = model_path   # 模型存放路径\n",
    "        self.model = model             # 定义的模型\n",
    "        self.optimizer = optimizer     # 优化器\n",
    "\n",
    "    def save(self):\n",
    "        # 保存模型\n",
    "        paddle.save(self.model.state_dict(), self.model_path)\n",
    "\n",
    "    def val_epoch(self, datasets):\n",
    "        self.model.eval()  # 将模型设置为评估状态\n",
    "        acc = list()\n",
    "        for batch_id, data in enumerate(datasets()):\n",
    "            images, labels = data\n",
    "            pred = self.model(images)   # 获取预测值\n",
    "            # 取 pred 中得分最高的索引作为分类结果\n",
    "            pred = paddle.argmax(pred, axis=-1)\n",
    "            res = paddle.equal(pred, labels)\n",
    "            res = paddle.cast(res, dtype='float32')\n",
    "            acc.extend(res.numpy())  # 追加\n",
    "        acc = np.array(acc).mean()\n",
    "        return acc\n",
    "\n",
    "    def train_step(self, data):\n",
    "        images, labels = data\n",
    "        # 前向计算的过程\n",
    "        predicts = self.model(images)\n",
    "        # 计算损失\n",
    "        loss = F.cross_entropy(predicts, labels)\n",
    "        avg_loss = paddle.mean(loss)\n",
    "        # 后向传播，更新参数的过程\n",
    "        avg_loss.backward()\n",
    "        self.optimizer.step()\n",
    "        self.optimizer.clear_grad()\n",
    "        return avg_loss\n",
    "\n",
    "    def train_epoch(self, datasets, epoch):\n",
    "        self.model.train()\n",
    "        for batch_id, data in enumerate(datasets()):\n",
    "            loss = self.train_step(data)\n",
    "            # 每训练了1000批次的数据，打印下当前Loss的情况\n",
    "            if batch_id % 500 == 0:\n",
    "                print(\"epoch_id: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, loss.numpy()))\n",
    "\n",
    "    def train(self, train_datasets, val_datasets, epochs):\n",
    "        for i in range(epochs):\n",
    "            self.train_epoch(train_datasets, i)\n",
    "            train_acc = self.val_epoch(train_datasets)\n",
    "            val_acc = self.val_epoch(val_datasets)\n",
    "            print(\"epoch_id: {}, train acc is: {}, val acc is {}\".format(i, train_acc, val_acc))\n",
    "        self.save()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "epochs = 10\n",
    "# 学习率\n",
    "lr = 0.1\n",
    "model_path = './mnist.pdparams'\n",
    "\n",
    "train_dataset = MnistDataset(mode='train')\n",
    "train_loader = paddle.io.DataLoader(train_dataset,\n",
    "                                    batch_size=32,\n",
    "                                    shuffle=True,\n",
    "                                    num_workers=10)\n",
    "\n",
    "val_dataset = MnistDataset(mode='val')\n",
    "val_loader = paddle.io.DataLoader(val_dataset, batch_size=128)\n",
    "\n",
    "model = MNIST()\n",
    "opt = paddle.optimizer.SGD(learning_rate=lr, parameters=model.parameters())\n",
    "\n",
    "trainer = Trainer(\n",
    "    model_path=model_path,\n",
    "    model=model,\n",
    "    optimizer=opt\n",
    ")\n",
    "\n",
    "trainer.train(train_datasets=train_loader, val_datasets=val_loader, epochs=epochs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "通过将学习率从 0.1 增大至 1.0 之后，发现模型在验证集上的准确率由之前的 85.03% 提升至 95.87%，由此可见我们之前猜测“学习率过小”的想法是正确的。\n",
    "\n",
    "通过将激活函数替换为 relu，模型在验证集上的准确率由之前的 85.03% 提升至 96.18%，由此可见 relu 在一定程度上要优于 sigmoid 激活函数。\n",
    "\n",
    "通过在单层前馈神经网路中添加一层隐藏层，模型在验证集上的准确率由之前的 96.18% 提升至 97.10%，模型的精度得到了进一步的提升。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 正则化\n",
    "为了防止模型过拟合，在没有扩充样本量的可能下，只能降低模型的复杂度，可以通过限制参数的数量或可能取值（参数值尽量小）实现。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义mnist数据识别网络结构，同房价预测网络\n",
    "class MultiMNIST(paddle.nn.Layer):\n",
    "    def __init__(self):\n",
    "        super(MultiMNIST, self).__init__()\n",
    "        # 定义一层全连接层，输出维度是1\n",
    "        self.fc1 = paddle.nn.Linear(in_features=784, out_features=512)\n",
    "        self.act = paddle.nn.ReLU()\n",
    "        self.fc2 = paddle.nn.Linear(in_features=512, out_features=256)\n",
    "        self.fc3 = paddle.nn.Linear(in_features=256, out_features=10)\n",
    "        self.softmax = paddle.nn.Softmax()\n",
    "\n",
    "    # 定义网络结构的前向计算过程\n",
    "    def forward(self, x):\n",
    "        outputs = self.fc1(x)\n",
    "        outputs = self.act(outputs)\n",
    "        outputs = self.fc2(outputs)\n",
    "        outputs = self.act(outputs)\n",
    "        outputs = self.fc3(outputs)\n",
    "        outputs = self.softmax(outputs)\n",
    "        return outputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "epochs = 10\n",
    "lr = 0.1\n",
    "use_gpu = False\n",
    "model_path = './mnist.pdparams'\n",
    "paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')\n",
    "train_dataset = MnistDataset(mode='train')\n",
    "train_loader = paddle.io.DataLoader(train_dataset,\n",
    "                                    batch_size=32,\n",
    "                                    shuffle=True,\n",
    "                                    num_workers=16)\n",
    "\n",
    "val_dataset = MnistDataset(mode='val')\n",
    "val_loader = paddle.io.DataLoader(val_dataset, batch_size=128)\n",
    "\n",
    "model = MultiMNIST()\n",
    "opt = paddle.optimizer.SGD(learning_rate=lr,\n",
    "                           weight_decay=paddle.regularizer.L2Decay(coeff=5e-4),\n",
    "                           parameters=model.parameters())\n",
    "\n",
    "trainer = Trainer(\n",
    "    model_path=model_path,\n",
    "    model=model,\n",
    "    optimizer=opt\n",
    ")\n",
    "\n",
    "trainer.train(train_datasets=train_loader, val_datasets=val_loader, epochs=epochs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import paddle\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "\n",
    "class DigitRecognizer:\n",
    "    def __init__(self, model_path):\n",
    "        \"\"\"\n",
    "        初始化识别器\n",
    "        Args:\n",
    "            model_path: 模型文件路径\n",
    "        \"\"\"\n",
    "        # 设置设备\n",
    "        self.device = paddle.device.get_device()\n",
    "        paddle.device.set_device(self.device)\n",
    "\n",
    "        # 加载模型\n",
    "        self.model = self._load_model(model_path)\n",
    "\n",
    "    def _load_model(self, model_path):\n",
    "        \"\"\"加载模型\"\"\"\n",
    "        try:\n",
    "            model = MultiMNIST()\n",
    "            model.load_dict(paddle.load(model_path))\n",
    "            model.eval()\n",
    "            return model\n",
    "        except Exception as e:\n",
    "            raise Exception(f\"模型加载失败: {str(e)}\")\n",
    "\n",
    "    def preprocess_image(self, image):\n",
    "        \"\"\"\n",
    "        预处理图像\n",
    "        Args:\n",
    "            image: PIL Image对象或numpy数组\n",
    "        \"\"\"\n",
    "        if isinstance(image, np.ndarray):\n",
    "            image = Image.fromarray(image)\n",
    "\n",
    "        # 转换为灰度图\n",
    "        if image.mode != 'L':\n",
    "            image = image.convert('L')\n",
    "\n",
    "        # 调整大小为28x28\n",
    "        image = image.resize((28, 28))\n",
    "\n",
    "        # 转换为numpy数组并归一化\n",
    "        img_array = np.array(image).astype('float32')\n",
    "        img_array = img_array / 255.0\n",
    "        # img_array = img_array/127.5 - 1\n",
    "\n",
    "        # 展平并转换为paddle tensor\n",
    "        img_tensor = paddle.to_tensor(img_array.reshape(1, 784))\n",
    "        return img_tensor\n",
    "\n",
    "    def predict(self, image):\n",
    "        \"\"\"\n",
    "        识别数字\n",
    "        Args:\n",
    "            image: 输入图像（PIL Image对象或numpy数组）\n",
    "        Returns:\n",
    "            预测的数字和概率\n",
    "        \"\"\"\n",
    "        try:\n",
    "            # 预处理图像\n",
    "            tensor = self.preprocess_image(image)\n",
    "\n",
    "            # 预测\n",
    "            with paddle.no_grad():\n",
    "                output = self.model(tensor)\n",
    "\n",
    "            # 获取预测结果\n",
    "            probs = output.numpy()[0]\n",
    "            predicted_digit = np.argmax(probs)  # 获取最大概率的索引\n",
    "            confidence = probs[predicted_digit]  # 获取对应的概率值\n",
    "\n",
    "            return int(predicted_digit), float(confidence)\n",
    "\n",
    "        except Exception as e:\n",
    "            print(f\"预测失败: {str(e)}\")\n",
    "            return None, None\n",
    "\n",
    "def main():\n",
    "    \"\"\"主函数\"\"\"\n",
    "    # 模型路径\n",
    "    MODEL_PATH = './mnist.pdparams'\n",
    "\n",
    "    try:\n",
    "        # 创建识别器\n",
    "        recognizer = DigitRecognizer(MODEL_PATH)\n",
    "\n",
    "        # 示例：读取测试图像\n",
    "        image_path = \"./datasets/example_0.png\"  # 替换为你的测试图像路径\n",
    "        image = Image.open(image_path)\n",
    "\n",
    "        # 预测\n",
    "        digit, probability = recognizer.predict(image)\n",
    "        if digit is not None:\n",
    "            print(f\"预测的数字是: {digit}\")\n",
    "            print(f\"预测的概率是: {probability:.4f}\")\n",
    "\n",
    "        # 批量预测示例\n",
    "        def batch_predict(image_paths):\n",
    "            results = []\n",
    "            for path in image_paths:\n",
    "                image = Image.open(path)\n",
    "                digit, prob = recognizer.predict(image)\n",
    "                results.append((path, digit, prob))\n",
    "            return results\n",
    "\n",
    "    except Exception as e:\n",
    "        print(f\"程序运行出错: {str(e)}\")\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    main()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "paddle_cuda112",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
