{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a373a751",
   "metadata": {},
   "source": [
    "# 自定义模型训练\n",
    "由于MindSpore是函数式编程，在执行训练前需要定义一个`损失网络`将前向网络和损失函数连接起来，其需继承`nn.Cell`。\n",
    "\n",
    "创建一个模型训练的类，与前面内容类似，继承`nn.TrainOneStepCell`这个基类，重写`__init__`和`construct`方法，在`__init__`方法中进行参数初始化，在`construct`方法中构建训练过程。\n",
    "\n",
    "为了方便后续训练，我们先把上几次的代码拷贝过来。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "ccadd8c9",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[WARNING] ME(40608:40188,MainProcess):2022-10-16-10:11:43.145.354 [mindspore\\dataset\\engine\\datasets_user_defined.py:656] Python multiprocessing is not supported on Windows platform.\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import mindspore.dataset as ds\n",
    "import mindspore.nn as nn\n",
    "import mindspore.ops as ops\n",
    "import mindspore as ms\n",
    "\n",
    "\n",
    "def create_data(data, w=2.0, b=3.0):\n",
    "    for item in range(data):\n",
    "        # 使用uniform均匀分布数据\n",
    "        x = np.random.uniform(-10.0, 10.0)\n",
    "        # 指定一个噪声来模拟数据\n",
    "        # 使用normal使其满足高斯分布,参数为数据的中间值和浮动值\n",
    "        noise = np.random.normal(0, 1)\n",
    "        y = w * x + b + noise\n",
    "        # 使用yield返回数据生成器，方便后续对数据进行处理\n",
    "        yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32)\n",
    "\n",
    "\n",
    "def create_dataset(data_num, batch_size, repeat_num):\n",
    "    dataset = ds.GeneratorDataset(list(create_data(data_num)), column_names=['data', 'label'])\n",
    "    dataset = dataset.batch(batch_size, drop_remainder=True)\n",
    "    dataset = dataset.repeat(repeat_num)\n",
    "    return dataset\n",
    "\n",
    "\n",
    "# 创建训练数据集\n",
    "train_dataset = create_dataset(data_num=1000, batch_size=10, repeat_num=1)\n",
    "\n",
    "\n",
    "class MyNet(nn.Cell):\n",
    "    \"\"\"定义网络模型\"\"\"\n",
    "    def __init__(self):\n",
    "        super(MyNet, self).__init__()\n",
    "        self.layer = nn.Dense(1, 1)\n",
    "\n",
    "    def construct(self, x):\n",
    "        fx = self.layer(x)\n",
    "        return fx\n",
    "\n",
    "\n",
    "# 初始化网络模型\n",
    "net = MyNet()\n",
    "\n",
    "# 获取可训练参数w和b\n",
    "model_params = net.trainable_params()\n",
    "\n",
    "\n",
    "class MyLossFunction(nn.LossBase):\n",
    "    \"\"\"定义损失函数\"\"\"\n",
    "    def __init__(self):\n",
    "        super(MyLossFunction, self).__init__()\n",
    "        self.abs = ops.Abs()\n",
    "\n",
    "    def construct(self, target, predict):\n",
    "        x = self.abs(target - predict)\n",
    "        return self.get_loss(x)\n",
    "\n",
    "\n",
    "class MyOptimizer(nn.Optimizer):\n",
    "    \"\"\"自定义优化器\"\"\"\n",
    "    def __init__(self, learning_rate, params, momentum=0.9):\n",
    "        super(MyOptimizer, self).__init__(learning_rate, params)\n",
    "        self.assign = ops.Assign()\n",
    "        # 定义初速度\n",
    "        self.moment = ms.Parameter(ms.Tensor(momentum, ms.float32), name=\"moment\")\n",
    "        # 定义动量参数\n",
    "        self.momentum = self.parameters.clone(prefix=\"momentum\", init=\"zeros\")\n",
    "\n",
    "    def construct(self, gradients):\n",
    "        lr = self.get_lr()\n",
    "        params = self.parameters\n",
    "        for i in range(len(params)):\n",
    "            # 给网络赋值动量参数\n",
    "            self.assign(self.momentum[i], self.momentum[i] * self.moment + gradients[i])\n",
    "            # 计算新的权重参数\n",
    "            update = params[i] - self.momentum[i] * lr\n",
    "            # 更新权重参数\n",
    "            self.assign(params[i], update)\n",
    "        return params"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cdd1b780",
   "metadata": {},
   "source": [
    "## 一、定义损失网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "36cf07d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import mindspore.nn as nn\n",
    "\n",
    "class MyLossNet(nn.Cell):\n",
    "    \"\"\"定义损失网络\"\"\"\n",
    "    def __init__(self, backbone, loss_fn): # backbone翻译为主干网络\n",
    "        super(MyLossNet, self).__init__(auto_prefix=False)\n",
    "        self.backbone = backbone\n",
    "        self.loss_fn = loss_fn\n",
    "    \n",
    "    def construct(self, data, label):\n",
    "        # 将神经网络的输出输入到损失函数，返回损失值\n",
    "        out = self.backbone(data)\n",
    "        return self.loss_fn(out, label)\n",
    "        \n",
    "    def backbone_net(self):\n",
    "        return self.backbone"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d367b40b",
   "metadata": {},
   "source": [
    "`auto_prefix`表示是否自动为`Cell及其子Cell`生成`NameSpace`。auto_prefix 的设置影响网络参数的`命名`，如果设置为True，则自动给网络参数的名称添加前缀，否则不添加前缀。默认为True。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "26430825",
   "metadata": {},
   "source": [
    "## 二、定义模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "328ce7d2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss: 8.775998\n",
      "Loss: 11.266728\n",
      "Loss: 9.77487\n",
      "Loss: 8.197581\n",
      "Loss: 7.004673\n",
      "Loss: 5.10197\n",
      "Loss: 5.629426\n",
      "Loss: 4.505775\n",
      "Loss: 2.0569727\n",
      "Loss: 2.834942\n",
      "Loss: 2.1460726\n",
      "Loss: 2.769179\n",
      "Loss: 2.7972474\n",
      "Loss: 4.417815\n",
      "Loss: 3.36381\n",
      "Loss: 5.926586\n",
      "Loss: 5.9554543\n",
      "Loss: 3.1054041\n",
      "Loss: 3.3922439\n",
      "Loss: 1.9679273\n",
      "Loss: 2.9817102\n",
      "Loss: 3.0976062\n",
      "Loss: 2.613661\n",
      "Loss: 2.9405441\n",
      "Loss: 2.3339572\n",
      "Loss: 2.3139713\n",
      "Loss: 1.9559338\n",
      "Loss: 2.2220104\n",
      "Loss: 2.2550616\n",
      "Loss: 3.202043\n",
      "Loss: 2.2262745\n",
      "Loss: 2.1477032\n",
      "Loss: 1.4579711\n",
      "Loss: 1.8045273\n",
      "Loss: 1.599718\n",
      "Loss: 1.7005503\n",
      "Loss: 1.518559\n",
      "Loss: 1.6469538\n",
      "Loss: 1.5198668\n",
      "Loss: 1.5855554\n",
      "Loss: 1.710009\n",
      "Loss: 1.2763832\n",
      "Loss: 1.3621738\n",
      "Loss: 0.98795384\n",
      "Loss: 0.93451226\n",
      "Loss: 1.3417166\n",
      "Loss: 0.96811485\n",
      "Loss: 0.64455163\n",
      "Loss: 0.80310315\n",
      "Loss: 0.93038976\n",
      "Loss: 0.8279886\n",
      "Loss: 0.8895013\n",
      "Loss: 0.9875407\n",
      "Loss: 0.71852255\n",
      "Loss: 0.65988207\n",
      "Loss: 0.92205775\n",
      "Loss: 0.6753975\n",
      "Loss: 0.59050506\n",
      "Loss: 1.1801149\n",
      "Loss: 0.43735498\n",
      "Loss: 1.0970823\n",
      "Loss: 0.69986457\n",
      "Loss: 0.8013399\n",
      "Loss: 1.1662219\n",
      "Loss: 0.68832904\n",
      "Loss: 0.74910784\n",
      "Loss: 0.84765303\n",
      "Loss: 0.7951411\n",
      "Loss: 0.78718305\n",
      "Loss: 0.6073128\n",
      "Loss: 0.94998276\n",
      "Loss: 0.92438155\n",
      "Loss: 0.860853\n",
      "Loss: 0.90258807\n",
      "Loss: 0.78609705\n",
      "Loss: 0.60829985\n",
      "Loss: 0.9312518\n",
      "Loss: 0.9101264\n",
      "Loss: 1.1911274\n",
      "Loss: 0.45261717\n",
      "Loss: 0.58300734\n",
      "Loss: 1.1171668\n",
      "Loss: 0.561353\n",
      "Loss: 0.77365196\n",
      "Loss: 0.92555964\n",
      "Loss: 0.9905758\n",
      "Loss: 0.66352576\n",
      "Loss: 1.0742066\n",
      "Loss: 1.0005906\n",
      "Loss: 0.6794155\n",
      "Loss: 0.53892076\n",
      "Loss: 0.6074486\n",
      "Loss: 0.9797141\n",
      "Loss: 0.7400064\n",
      "Loss: 0.7431849\n",
      "Loss: 1.0282872\n",
      "Loss: 0.8820325\n",
      "Loss: 0.8083378\n",
      "Loss: 0.8264815\n",
      "Loss: 0.9432265\n"
     ]
    }
   ],
   "source": [
    "import mindspore.ops as ops\n",
    "\n",
    "class MyTrainStep(nn.TrainOneStepCell):\n",
    "    \"\"\"定义模型训练过程\"\"\"\n",
    "    def __init__(self, network, optimizer):\n",
    "        # 初始化参数\n",
    "        super(MyTrainStep, self).__init__(network, optimizer)\n",
    "        self.grad = ops.GradOperation(get_by_list=True)\n",
    "        \n",
    "    def construct(self, data, label):\n",
    "        # 构建训练过程\n",
    "        weights = self.weights\n",
    "        loss = self.network(data, label)\n",
    "        grads = self.grad(self.network, weights)(data, label)\n",
    "        return loss, self.optimizer(grads)\n",
    "    \n",
    "# 执行训练\n",
    "loss_func = MyLossFunction()\n",
    "optim = MyOptimizer(0.01, net.trainable_params())\n",
    "loss_net = MyLossNet(net, loss_func)\n",
    "train_net = MyTrainStep(loss_net, optim)\n",
    "\n",
    "for data in train_dataset.create_dict_iterator():\n",
    "    train_net(data['data'], data['label'])\n",
    "    loss = loss_net(data['data'], data['label'])\n",
    "    print(f\"Loss: {loss}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e4254f6c",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "mindspore",
   "language": "python",
   "name": "mindvision"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
