{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Import Some Packages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import paddle\n",
    "from paddle.io import Dataset, DataLoader\n",
    "import paddle.nn as nn\n",
    "import paddle.nn.functional as F\n",
    "# import paddle.fluid as fluid\n",
    "import paddle.metric as metric\n",
    "import numpy as np\n",
    "import gc\n",
    "# For plotting\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Preparing Data\n",
    "Load the training and testing data from the .npy file (NumPy array)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading data ...\n",
      "Size of training data: (860952, 429)\n",
      "Size of testing data: (184490, 429)\n"
     ]
    }
   ],
   "source": [
    "print('Loading data ...')\n",
    "\n",
    "data_root='data/'\n",
    "train = np.load(data_root + 'train_x.npy')\n",
    "train_label = np.load(data_root + 'train_y.npy')\n",
    "test = np.load(data_root + 'test_x.npy')\n",
    "test_label = np.load(data_root + 'test_y.npy')\n",
    "\n",
    "print('Size of training data: {}'.format(train.shape))\n",
    "print('Size of testing data: {}'.format(test.shape))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Create Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TIMITDataset(Dataset):\n",
    "    def __init__(self, X, y=None):\n",
    "        self.data = paddle.to_tensor(X,dtype=\"float32\")\n",
    "        if y is not None:\n",
    "            y = y.astype(np.int64)\n",
    "            self.label = paddle.to_tensor(y)\n",
    "        else:\n",
    "            self.label = None\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        if self.label is not None:\n",
    "            return self.data[idx], self.label[idx]\n",
    "        else:\n",
    "            return self.data[idx], -1\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Split the labeled data into a training set and a validation set."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Size of training set: (602666, 429)\n",
      "Size of validation set: (258286, 429)\n"
     ]
    }
   ],
   "source": [
    "# TODO:  you can modify the variable `VAL_RATIO` to change the ratio of validation data.\n",
    "VAL_RATIO = 0.3\n",
    "\n",
    "percent = int(train.shape[0] * (1 - VAL_RATIO))\n",
    "train_x, train_y, val_x, val_y = train[:percent], train_label[:percent], train[percent:], train_label[percent:]\n",
    "print('Size of training set: {}'.format(train_x.shape))\n",
    "print('Size of validation set: {}'.format(val_x.shape))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Create a data loader from the dataset, feel free to tweak the variable BATCH_SIZE here."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# TODO: you can tune the hyperparameter BATCH_SIZE here\n",
    "BATCH_SIZE = 64\n",
    "\n",
    "train_set = TIMITDataset(train_x, train_y)\n",
    "val_set = TIMITDataset(val_x, val_y)\n",
    "\n",
    "train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True) # only shuffle the training data\n",
    "val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Cleanup the unneeded variables to save memory.\n",
    "\n",
    "notes: if you need to use these variables later, then you may remove this block or clean up unneeded variables later\n",
    "the data size is quite huge, so be aware of memory usage in colab"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1150"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "del train, train_label, train_x, train_y, val_x, val_y\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Create Model\n",
    "Define model architecture, you are encouraged to change and experiment with the model architecture."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Classifier(nn.Layer):\n",
    "    # TODO: How to modify this model to achieve better performance?\n",
    "    def __init__(self):\n",
    "        super(Classifier, self).__init__()\n",
    "        self.layer1 = nn.Linear(429, 39)\n",
    "        self.act_fn = nn.Sigmoid()\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.layer1(x)\n",
    "        x = self.act_fn(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Training\n",
    "Fix random seeds for reproducibility."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# fix random seed\n",
    "def same_seeds(seed):\n",
    "    paddle.seed(seed)\n",
    "    np.random.seed(seed)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Feel free to change the training parameters here."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# fix random seed for reproducibility\n",
    "same_seeds(0)\n",
    "\n",
    "# TODO: How to tune these hyper-parameters to improve your model's performance?\n",
    "\n",
    "# training parameters\n",
    "epochs = 3             # number of training epoch\n",
    "learning_rate = 0.001       # learning rate\n",
    "\n",
    "# the path where checkpoint saved\n",
    "model_path = 'work/model'\n",
    "\n",
    "# create model, define a loss function, and optimizer\n",
    "model = Classifier()\n",
    "criterion = nn.CrossEntropyLoss() \n",
    "optimizer = paddle.optimizer.Adam(parameters=model.parameters(), learning_rate=learning_rate)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'fluid' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[12], line 14\u001b[0m\n\u001b[0;32m     12\u001b[0m val_num \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0.0\u001b[39m\n\u001b[0;32m     13\u001b[0m val_loss \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0.0\u001b[39m\n\u001b[1;32m---> 14\u001b[0m accuracy_manager \u001b[38;5;241m=\u001b[39m \u001b[43mfluid\u001b[49m\u001b[38;5;241m.\u001b[39mmetrics\u001b[38;5;241m.\u001b[39mAccuracy()\n\u001b[0;32m     15\u001b[0m val_accuracy_manager \u001b[38;5;241m=\u001b[39m fluid\u001b[38;5;241m.\u001b[39mmetrics\u001b[38;5;241m.\u001b[39mAccuracy()\n\u001b[0;32m     16\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m batch_id, data \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(train_loader()):\n",
      "\u001b[1;31mNameError\u001b[0m: name 'fluid' is not defined"
     ]
    }
   ],
   "source": [
    "\n",
    "# start training\n",
    "\n",
    "best_acc = 0.0\n",
    "loss_record = {'train': [], 'val': []}      # for recording training loss\n",
    "        \n",
    "for epoch in range(epochs):\n",
    "    model.train()\n",
    "    train_num = 0.0\n",
    "    train_loss = 0.0\n",
    "    train_hit = 0.0\n",
    "\n",
    "    val_num = 0.0\n",
    "    val_loss = 0.0\n",
    "    accuracy_manager = fluid.metrics.Accuracy()\n",
    "    val_accuracy_manager = fluid.metrics.Accuracy()\n",
    "    for batch_id, data in enumerate(train_loader()):\n",
    "        x_data = data[0]\n",
    "        y_data = data[1]\n",
    "\n",
    "        # ===================forward=====================\n",
    "        predicts = model(x_data)\n",
    "        loss = criterion(predicts, y_data)\n",
    "\n",
    "\n",
    "        # ==================calculate acc================\n",
    "        acc = paddle.metric.accuracy(predicts, y_data)\n",
    "        accuracy_manager.update(value = acc.numpy(), weight = len(y_data))\n",
    "\n",
    "        if batch_id % 1000 == 0:\n",
    "            loss_record['train'].append(loss.numpy())\n",
    "            print(\"epoch: {}, batch_id: {}, loss is: {}, acc is: {}\".format(epoch, batch_id, loss.numpy(), acc.numpy()))\n",
    "\n",
    "        # ===================backward====================\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        optimizer.clear_grad()\n",
    "\n",
    "        train_loss += loss\n",
    "        train_num += len(y_data)\n",
    "\n",
    "    train_acc = accuracy_manager.eval()\n",
    "    total_train_loss = (train_loss / train_num) * BATCH_SIZE\n",
    "    print(\"epoch: {}, train loss is: {}, train acc is: {}\".format(epoch, total_train_loss.numpy(),train_acc))\n",
    "    \n",
    "    model.eval()\n",
    "    for batch_id, data in enumerate(val_loader()):\n",
    "        x_data = data[0]\n",
    "        y_data = data[1]\n",
    "        \n",
    "        # ===================forward=====================\n",
    "        predicts = model(x_data)\n",
    "        loss = criterion(predicts, y_data)\n",
    "\n",
    "        # ==================calculate acc================\n",
    "        acc = paddle.metric.accuracy(predicts, y_data)\n",
    "        val_accuracy_manager.update(value = acc.numpy(), weight = len(y_data))\n",
    "\n",
    "        val_loss += loss\n",
    "        val_num += len(y_data)\n",
    "\n",
    "    val_acc = val_accuracy_manager.eval()\n",
    "    total_val_loss = (val_loss / val_num) * BATCH_SIZE\n",
    "    loss_record['val'].append(total_val_loss.numpy())\n",
    "    print(\"epoch: {}, val loss is: {}, val acc is: {}\".format(epoch, total_val_loss.numpy(), val_acc))\n",
    "    # ===================save====================\n",
    "    if val_acc > best_acc:\n",
    "        best_acc = val_acc\n",
    "        paddle.save(model.state_dict(), 'work/model/best_model.pdparams')\n",
    "        paddle.save(optimizer.state_dict(), \"work/model/best_optimizer.pdopt\")\n",
    "\n",
    "\n",
    "paddle.save(model.state_dict(), 'work/model/final_model.pdparams')\n",
    "paddle.save(optimizer.state_dict(), \"work/model/final_optimizer.pdopt\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Evaluate on the public test set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "test_set = TIMITDataset(test, test_label)\n",
    "test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=False)\n",
    "\n",
    "test_num = 0.0\n",
    "test_loss = 0.0\n",
    "\n",
    "model_state_dict = paddle.load('work/model/best_model.pdparams')\n",
    "# optimizer_state_dict = paddle.load('work/model/best_model.pdopt')\n",
    "model.set_state_dict(model_state_dict)\n",
    "# optimizer.set_state_dict(optimizer_state_dict)\n",
    "\n",
    "model.eval()\n",
    "test_accuracy_manager = fluid.metrics.Accuracy()\n",
    "for batch_id, data in enumerate(test_loader()):\n",
    "    x_data = data[0]\n",
    "    y_data = data[1]\n",
    "    \n",
    "    # ===================forward=====================\n",
    "    predicts = model(x_data)\n",
    "    loss = criterion(predicts, y_data)\n",
    "\n",
    "    # ==================calculate acc================\n",
    "    acc = paddle.metric.accuracy(predicts, y_data)\n",
    "    test_accuracy_manager.update(value = acc.numpy(), weight = len(y_data))\n",
    "\n",
    "    test_loss += loss\n",
    "    test_num += len(y_data)\n",
    "\n",
    "test_acc = test_accuracy_manager.eval()\n",
    "total_test_loss = (test_loss / test_num) * BATCH_SIZE\n",
    "print(\"test loss is: {}, test acc is: {}\".format(total_test_loss.numpy(), test_acc))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Get Private Test Set Prediction\n",
    "Remember to run this part to obtain private test set prediction, and submit the predict.csv file for (2pt) Private simple baseline and (4pt) Private strong baseline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "shellscript"
    }
   },
   "outputs": [],
   "source": [
    "# Change to your own path\n",
    "!unzip -o data/data321226/private_test_x.zip -d work/data/\n",
    "!ls work/data/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "print('Loading data ...')\n",
    "\n",
    "data_root='work/data/'\n",
    "private_test = np.load(data_root + 'private_test_x.npy')\n",
    "\n",
    "print('Size of private testing data: {}'.format(private_test.shape))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "private_test_set = TIMITDataset(private_test)\n",
    "private_test_loader = DataLoader(private_test_set, batch_size=BATCH_SIZE, shuffle=False)\n",
    "\n",
    "model_state_dict = paddle.load('work/model/best_model.pdparams')\n",
    "model.set_state_dict(model_state_dict)\n",
    "model.eval()\n",
    "predictions = []\n",
    "\n",
    "for batch_id, data in enumerate(private_test_loader()):\n",
    "    x_data = data[0]\n",
    "    logits  = model(x_data)\n",
    "    predictions.extend(paddle.argmax(logits, axis=1).cpu().numpy().tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Save predictions into the file.\n",
    "with open(\"a1.csv\", \"w\") as f:\n",
    "    for i, pred in  enumerate(predictions):\n",
    "         f.write(f\"{pred}\\n\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Plot loss curves"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def plot_learning_curve(loss_record, title=''):\n",
    "    ''' Plot learning curve of your DNN (train & val loss) '''\n",
    "    total_steps = len(loss_record['train'])\n",
    "    x_1 = range(total_steps)\n",
    "    x_2 = x_1[len(loss_record['train']) // len(loss_record['val'])-1::len(loss_record['train']) // len(loss_record['val'])]\n",
    "    plt.figure(figsize=(6, 4))\n",
    "    plt.plot(x_1, loss_record['train'], c='tab:red', label='train')\n",
    "    plt.plot(x_2, loss_record['val'], c='tab:cyan', label='val')\n",
    "    plt.ylim(0.0, 5.)\n",
    "    plt.xlabel('Training steps')\n",
    "    plt.ylabel('CE loss')\n",
    "    plt.title('Learning curve of {}'.format(title))\n",
    "    plt.legend()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_learning_curve(loss_record, title='deep model')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Running verify PaddlePaddle program ... \n",
      "PaddlePaddle works well on 1 CPU.\n",
      "PaddlePaddle works well on 2 CPUs.\n",
      "PaddlePaddle is installed successfully! Let's start deep learning with PaddlePaddle now.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\ProgramData\\anaconda3\\envs\\paddle\\lib\\site-packages\\paddle\\fluid\\executor.py:1583: UserWarning: Standalone executor is not used for data parallel\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "The device should not be <re.Match object; span=(0, 5), match='gpu:0'>, since PaddlePaddle is not compiled with CUDA",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[2], line 4\u001b[0m\n\u001b[0;32m      2\u001b[0m  \u001b[38;5;66;03m# 开启0号GPU训练\u001b[39;00m\n\u001b[0;32m      3\u001b[0m use_gpu \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m----> 4\u001b[0m \u001b[43mpaddle\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mset_device\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mgpu:0\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m use_gpu \u001b[38;5;28;01melse\u001b[39;00m paddle\u001b[38;5;241m.\u001b[39mdevice\u001b[38;5;241m.\u001b[39mset_device(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcpu\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
      "File \u001b[1;32mc:\\ProgramData\\anaconda3\\envs\\paddle\\lib\\site-packages\\paddle\\device\\__init__.py:316\u001b[0m, in \u001b[0;36mset_device\u001b[1;34m(device)\u001b[0m\n\u001b[0;32m    294\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mset_device\u001b[39m(device):\n\u001b[0;32m    295\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m    296\u001b[0m \u001b[38;5;124;03m    Paddle supports running calculations on various types of devices, including CPU, GPU, XPU, NPU, MLU and IPU.\u001b[39;00m\n\u001b[0;32m    297\u001b[0m \u001b[38;5;124;03m    They are represented by string identifiers. This function can specify the global device\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    314\u001b[0m \u001b[38;5;124;03m        data = paddle.stack([x1,x2], axis=1)\u001b[39;00m\n\u001b[0;32m    315\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m--> 316\u001b[0m     place \u001b[38;5;241m=\u001b[39m \u001b[43m_convert_to_place\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    317\u001b[0m     framework\u001b[38;5;241m.\u001b[39m_set_expected_place(place)\n\u001b[0;32m    318\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m place\n",
      "File \u001b[1;32mc:\\ProgramData\\anaconda3\\envs\\paddle\\lib\\site-packages\\paddle\\device\\__init__.py:257\u001b[0m, in \u001b[0;36m_convert_to_place\u001b[1;34m(device)\u001b[0m\n\u001b[0;32m    255\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m avaliable_gpu_device:\n\u001b[0;32m    256\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m core\u001b[38;5;241m.\u001b[39mis_compiled_with_cuda():\n\u001b[1;32m--> 257\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    258\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThe device should not be \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m, since PaddlePaddle is \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    259\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnot compiled with CUDA\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(avaliable_gpu_device))\n\u001b[0;32m    260\u001b[0m     device_info_list \u001b[38;5;241m=\u001b[39m device\u001b[38;5;241m.\u001b[39msplit(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m:\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;241m1\u001b[39m)\n\u001b[0;32m    261\u001b[0m     device_id \u001b[38;5;241m=\u001b[39m device_info_list[\u001b[38;5;241m1\u001b[39m]\n",
      "\u001b[1;31mValueError\u001b[0m: The device should not be <re.Match object; span=(0, 5), match='gpu:0'>, since PaddlePaddle is not compiled with CUDA"
     ]
    }
   ],
   "source": [
    "paddle.utils.run_check()\n",
    " # 开启0号GPU训练\n",
    "use_gpu = True\n",
    "paddle.device.set_device('gpu:0') if use_gpu else paddle.device.set_device('cpu')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "paddle",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.21"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
