{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:38.908.776 [mindspore/_check_version.py:207] MindSpore version 1.1.1 and \"te\" wheel package version 1.0 does not match, reference to the match info on: https://www.mindspore.cn/install\n",
      "MindSpore version 1.1.1 and \"topi\" wheel package version 0.6.0 does not match, reference to the match info on: https://www.mindspore.cn/install\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:39.448.087 [mindspore/ops/operations/array_ops.py:2302] WARN_DEPRECATED: The usage of Pack is deprecated. Please use Stack.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING: 'ControlDepend' is deprecated from version 1.1 and will be removed in a future version, use 'Depend' instead.\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import argparse\n",
    "import random\n",
    "import time\n",
    "import numpy as np\n",
    "# import moxing as mox\n",
    "\n",
    "from mindspore import context\n",
    "from mindspore import Tensor\n",
    "from mindspore.nn.optim.momentum import Momentum\n",
    "from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits\n",
    "from mindspore.train.model import Model\n",
    "from mindspore.context import ParallelMode\n",
    "from mindspore.train.callback import Callback, LossMonitor\n",
    "from mindspore.train.loss_scale_manager import FixedLossScaleManager\n",
    "from mindspore.communication.management import init\n",
    "import mindspore.dataset.engine as de\n",
    "\n",
    "from dataset import create_dataset\n",
    "from resnet import resnet50\n",
    "\n",
    "random.seed(1)\n",
    "np.random.seed(1)\n",
    "de.config.set_seed(1)\n",
    "device_num = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PerformanceCallback(Callback):\n",
    "    \"\"\"\n",
    "    Training performance callback.\n",
    "\n",
    "    Args:\n",
    "        batch_size (int): Batch number for one step.\n",
    "    \"\"\"\n",
    "    def __init__(self, batch_size):\n",
    "        super(PerformanceCallback, self).__init__()\n",
    "        self.batch_size = batch_size\n",
    "        self.last_step = 0\n",
    "        self.epoch_begin_time = 0\n",
    "\n",
    "    def step_begin(self, run_context):\n",
    "        self.epoch_begin_time = time.time()\n",
    "\n",
    "    def step_end(self, run_context):\n",
    "        params = run_context.original_args()\n",
    "        cost_time = time.time() - self.epoch_begin_time\n",
    "        train_steps = params.cur_step_num -self.last_step\n",
    "        print(f'epoch {params.cur_epoch_num} cost time = {cost_time}, train step num: {train_steps}, '\n",
    "              f'one step time: {1000*cost_time/train_steps} ms, '\n",
    "              f'train samples per second of cluster: {device_num*train_steps*self.batch_size/cost_time:.1f}\\n')\n",
    "        self.last_step = run_context.original_args().cur_step_num\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_lr(global_step,\n",
    "           total_epochs,\n",
    "           steps_per_epoch,\n",
    "           lr_init=0.01,\n",
    "           lr_max=0.1,\n",
    "           warmup_epochs=5):\n",
    "    \"\"\"\n",
    "    Generate learning rate array.\n",
    "\n",
    "    Args:\n",
    "        global_step (int): Initial step of training.\n",
    "        total_epochs (int): Total epoch of training.\n",
    "        steps_per_epoch (float): Steps of one epoch.\n",
    "        lr_init (float): Initial learning rate. Default: 0.01.\n",
    "        lr_max (float): Maximum learning rate. Default: 0.1.\n",
    "        warmup_epochs (int): The number of warming up epochs. Default: 5.\n",
    "\n",
    "    Returns:\n",
    "        np.array, learning rate array.\n",
    "    \"\"\"\n",
    "    lr_each_step = []\n",
    "    total_steps = steps_per_epoch * total_epochs\n",
    "    warmup_steps = steps_per_epoch * warmup_epochs\n",
    "    if warmup_steps != 0:\n",
    "        inc_each_step = (float(lr_max) - float(lr_init)) / float(warmup_steps)\n",
    "    else:\n",
    "        inc_each_step = 0\n",
    "    for i in range(int(total_steps)):\n",
    "        if i < warmup_steps:\n",
    "            lr = float(lr_init) + inc_each_step * float(i)\n",
    "        else:\n",
    "            base = ( 1.0 - (float(i) - float(warmup_steps)) / (float(total_steps) - float(warmup_steps)) )\n",
    "            lr = float(lr_max) * base * base\n",
    "            if lr < 0.0:\n",
    "                lr = 0.0\n",
    "        lr_each_step.append(lr)\n",
    "\n",
    "    current_step = global_step\n",
    "    lr_each_step = np.array(lr_each_step).astype(np.float32)\n",
    "    learning_rate = lr_each_step[current_step:]\n",
    "\n",
    "    return learning_rate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def resnet50_train():\n",
    "    device_id = 0\n",
    "    device_num = 1\n",
    "    epoch_size = 90\n",
    "    batch_size = 32\n",
    "    class_num = 10\n",
    "    loss_scale_num = 1024\n",
    "    local_data_path = '/home/share/dataset/cifar-10-batches-bin/' # your cifar10 path\n",
    "\n",
    "    # set graph mode and parallel mode\n",
    "    context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", save_graphs=False, device_id=device_id)\n",
    "\n",
    "    if device_num > 1:\n",
    "        context.set_auto_parallel_context(device_num=device_num,\n",
    "                                          parallel_mode=ParallelMode.DATA_PARALLEL,\n",
    "                                          gradients_mean=True)\n",
    "        init()\n",
    "        local_data_path = os.path.join(local_data_path, str(device_id))\n",
    "\n",
    "    # data download\n",
    "    print('Download data.')\n",
    "#     mox.file.copy_parallel(src_url=args_opt.data_url, dst_url=local_data_path)\n",
    "\n",
    "    # create dataset\n",
    "    print('Create train and evaluate dataset.')\n",
    "    train_dataset = create_dataset(dataset_path=local_data_path, do_train=True,\n",
    "                                   repeat_num=1, batch_size=batch_size)\n",
    "    eval_dataset = create_dataset(dataset_path=local_data_path, do_train=False,\n",
    "                                   repeat_num=1, batch_size=batch_size)\n",
    "    train_step_size = train_dataset.get_dataset_size()\n",
    "    print('Create dataset success.')\n",
    "\n",
    "    # create model\n",
    "    net = resnet50(class_num = class_num)\n",
    "    # reduction='mean' means that apply reduction of mean to loss\n",
    "    loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n",
    "    lr = Tensor(get_lr(global_step=0, total_epochs=epoch_size, steps_per_epoch=train_step_size))\n",
    "    opt = Momentum(net.trainable_params(), lr, momentum=0.9, weight_decay=1e-4, loss_scale=loss_scale_num)\n",
    "    loss_scale = FixedLossScaleManager(loss_scale_num, False)\n",
    "\n",
    "    # amp_level=\"O2\" means that the hybrid precision of O2 mode is used for training\n",
    "    # the whole network except that batchnoram will be cast into float16 format and dynamic loss scale will be used\n",
    "    # 'keep_batchnorm_fp32 = False' means that use the float16 format\n",
    "    model = Model(net, amp_level=\"O2\", keep_batchnorm_fp32=False, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'})\n",
    "\n",
    "    # define performance callback to show ips and loss callback to show loss for every epoch\n",
    "    performance_cb = PerformanceCallback(batch_size)\n",
    "    loss_cb = LossMonitor()\n",
    "    cb = [performance_cb, loss_cb]\n",
    "\n",
    "    print(f'Start run training, total epoch: {epoch_size}.')\n",
    "    model.train(epoch_size, train_dataset, callbacks=cb)\n",
    "    \n",
    "    if device_num == 1 or device_id == 0:\n",
    "        print(f'=================================Start run evaluation.=================================')\n",
    "        output = model.eval(eval_dataset)\n",
    "        print(f'Evaluation result: {output}.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Download data.\n",
      "Create train and evaluate dataset.\n",
      "Create dataset success.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:40.589.791 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:40.615.008 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:40.640.383 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:40.693.303 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:40.732.094 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:40.770.718 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:40.809.566 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:40.938.391 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:41.317.61 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:41.124.799 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:41.218.374 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:41.312.420 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:41.405.338 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:41.831.187 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:42.142.517 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n",
      "[WARNING] ME(129552:281472913131056,MainProcess):2021-03-12-15:02:42.464.974 [mindspore/ops/operations/math_ops.py:171] WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Start run training, total epoch: 20.\n",
      "epoch 1 cost time = 129.5071153640747, train step num: 1562, one step time: 82.91108538032952 ms, train samples per second of cluster: 386.0\n",
      "\n",
      "epoch: 1 step: 1562, loss is 1.4099786\n",
      "epoch 2 cost time = 26.813992261886597, train step num: 1562, one step time: 17.166448311066965 ms, train samples per second of cluster: 1864.1\n",
      "\n",
      "epoch: 2 step: 1562, loss is 1.7900116\n",
      "epoch 3 cost time = 26.816537618637085, train step num: 1562, one step time: 17.168077860843205 ms, train samples per second of cluster: 1863.9\n",
      "\n",
      "epoch: 3 step: 1562, loss is 1.2243073\n",
      "epoch 4 cost time = 26.817380666732788, train step num: 1562, one step time: 17.168617584335973 ms, train samples per second of cluster: 1863.9\n",
      "\n",
      "epoch: 4 step: 1562, loss is 1.0838339\n",
      "epoch 5 cost time = 26.81809091567993, train step num: 1562, one step time: 17.16907228916769 ms, train samples per second of cluster: 1863.8\n",
      "\n",
      "epoch: 5 step: 1562, loss is 0.62525547\n",
      "epoch 6 cost time = 26.817250967025757, train step num: 1562, one step time: 17.16853454995247 ms, train samples per second of cluster: 1863.9\n",
      "\n",
      "epoch: 6 step: 1562, loss is 0.74205196\n",
      "epoch 7 cost time = 26.818840980529785, train step num: 1562, one step time: 17.169552484334048 ms, train samples per second of cluster: 1863.8\n",
      "\n",
      "epoch: 7 step: 1562, loss is 0.6567188\n",
      "epoch 8 cost time = 26.817502737045288, train step num: 1562, one step time: 17.168695734343974 ms, train samples per second of cluster: 1863.9\n",
      "\n",
      "epoch: 8 step: 1562, loss is 0.827834\n",
      "epoch 9 cost time = 26.817890167236328, train step num: 1562, one step time: 17.16894376903734 ms, train samples per second of cluster: 1863.8\n",
      "\n",
      "epoch: 9 step: 1562, loss is 0.3997241\n",
      "epoch 10 cost time = 26.81843328475952, train step num: 1562, one step time: 17.16929147551826 ms, train samples per second of cluster: 1863.8\n",
      "\n",
      "epoch: 10 step: 1562, loss is 0.4481091\n",
      "epoch 11 cost time = 26.8180935382843, train step num: 1562, one step time: 17.16907396817177 ms, train samples per second of cluster: 1863.8\n",
      "\n",
      "epoch: 11 step: 1562, loss is 0.33693692\n",
      "epoch 12 cost time = 26.819440126419067, train step num: 1562, one step time: 17.169936060447547 ms, train samples per second of cluster: 1863.7\n",
      "\n",
      "epoch: 12 step: 1562, loss is 0.15339586\n",
      "epoch 13 cost time = 26.819883584976196, train step num: 1562, one step time: 17.170219964773494 ms, train samples per second of cluster: 1863.7\n",
      "\n",
      "epoch: 13 step: 1562, loss is 0.34012866\n",
      "epoch 14 cost time = 26.821850061416626, train step num: 1562, one step time: 17.17147891255866 ms, train samples per second of cluster: 1863.6\n",
      "\n",
      "epoch: 14 step: 1562, loss is 0.270985\n",
      "epoch 15 cost time = 26.820826292037964, train step num: 1562, one step time: 17.17082349042123 ms, train samples per second of cluster: 1863.6\n",
      "\n",
      "epoch: 15 step: 1562, loss is 0.70893455\n",
      "epoch 16 cost time = 26.82073426246643, train step num: 1562, one step time: 17.17076457264176 ms, train samples per second of cluster: 1863.6\n",
      "\n",
      "epoch: 16 step: 1562, loss is 0.23259175\n",
      "epoch 17 cost time = 26.821929216384888, train step num: 1562, one step time: 17.171529587954474 ms, train samples per second of cluster: 1863.5\n",
      "\n",
      "epoch: 17 step: 1562, loss is 0.1518214\n",
      "epoch 18 cost time = 26.82370948791504, train step num: 1562, one step time: 17.172669326450087 ms, train samples per second of cluster: 1863.4\n",
      "\n",
      "epoch: 18 step: 1562, loss is 0.019834736\n",
      "epoch 19 cost time = 26.824481964111328, train step num: 1562, one step time: 17.17316386946948 ms, train samples per second of cluster: 1863.4\n",
      "\n",
      "epoch: 19 step: 1562, loss is 0.05388867\n",
      "epoch 20 cost time = 26.822935581207275, train step num: 1562, one step time: 17.17217386761029 ms, train samples per second of cluster: 1863.5\n",
      "\n",
      "epoch: 20 step: 1562, loss is 0.054800615\n",
      "Start run evaluation.\n",
      "Evaluation result: {'acc': 0.9032451923076923}.\n",
      "ResNet50 training success!\n"
     ]
    }
   ],
   "source": [
    "resnet50_train()\n",
    "print('ResNet50 training success!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
