{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch \n",
    "import torchvision\n",
    "import torch.nn as nn\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train_0_F.shape:  (700000, 15)\n",
      "train_1_F.shape:  (42000, 15)\n"
     ]
    }
   ],
   "source": [
    "train_0_F = np.loadtxt(open(\"../../data/motor_fault/train_0_F.csv\",\"rb\"), delimiter=\",\", skiprows=0)\n",
    "train_1_F = np.loadtxt(open(\"../../data/motor_fault/train_1_F.csv\",\"rb\"), delimiter=\",\", skiprows=0)\n",
    "print(\"train_0_F.shape: \", train_0_F.shape)\n",
    "print(\"train_1_F.shape: \", train_1_F.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Device configuration\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Hyper-parameters\n",
    "input_size = 14\n",
    "hidden_size1 = 100\n",
    "hidden_size2 = 60\n",
    "hidden_size3 = 30\n",
    "num_classes = 2\n",
    "num_epochs = 50\n",
    "t_max = 10\n",
    "batch_size = 100\n",
    "learning_rate = 1e-2\n",
    "valid_size = 0.2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Dataset\n",
    "train_F = np.concatenate((train_0_F, train_1_F), axis=0)\n",
    "x = torch.tensor(train_F[:,:-1], dtype=torch.float32)\n",
    "y = torch.tensor(train_F[:,-1], dtype=torch.long)\n",
    "dataset = torch.utils.data.TensorDataset(x, y)\n",
    "\n",
    "num_train = len(dataset)\n",
    "indices = list(range(num_train))\n",
    "split = int(np.floor(valid_size * num_train))\n",
    "np.random.shuffle(indices)\n",
    "train_idx, test_idx = indices[:split], indices[split:]\n",
    "trainset = torch.utils.data.Subset(dataset=dataset, indices=train_idx)\n",
    "testset = torch.utils.data.Subset(dataset=dataset, indices=test_idx)\n",
    "\n",
    "trainloader = torch.utils.data.DataLoader(trainset,\n",
    "                                     batch_size = batch_size,\n",
    "                                     shuffle = True,\n",
    "                                     num_workers = 2)\n",
    "testloader = torch.utils.data.DataLoader(testset,\n",
    "                                     batch_size = batch_size,\n",
    "                                     shuffle = False,\n",
    "                                     num_workers = 2)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Fully connected neural network with one hidden layer\n",
    "class NeuralNet(nn.Module):\n",
    "    def __init__(self, input_size=input_size, hidden_size1=hidden_size1, \n",
    "                 hidden_size2=hidden_size2, hidden_size3=hidden_size3, num_classes=num_classes):\n",
    "        super(NeuralNet, self).__init__()\n",
    "        self.fc1 = nn.Linear(input_size, hidden_size1)\n",
    "        self.relu = nn.LeakyReLU()\n",
    "        self.fc2 = nn.Linear(hidden_size1, hidden_size2)\n",
    "        self.fc3 = nn.Linear(hidden_size2, hidden_size3)\n",
    "        self.fc4 = nn.Linear(hidden_size3, num_classes)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.fc1(x)\n",
    "        out = self.relu(out)\n",
    "        out = self.fc2(out)\n",
    "        out = self.relu(out)\n",
    "        out = self.fc3(out)\n",
    "        out = self.relu(out)\n",
    "        out = self.fc4(out)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = NeuralNet().to(device)\n",
    "weight=torch.tensor([30,500]).to(device)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, nesterov=True)\n",
    "lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max = t_max)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(loader):\n",
    "    # test on all test data\n",
    "    correct = 0.0\n",
    "    total = 0.0\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        for x, y in loader:\n",
    "            x = x.to(device)\n",
    "            y = y.to(device)\n",
    "            outputs = model(x)\n",
    "            predicted = torch.max(outputs.data, 1)[1]\n",
    "            total += y.size(0)\n",
    "            correct += (predicted == y).sum().item()\n",
    "\n",
    "    model.train()\n",
    "    #print('Test_acc: {:.2f}%'.format(100 * correct / total))\n",
    "    return 100 * correct / total"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1-3/(50+3) = 0.943"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train_acc: 94.3228\n",
      "test_acc: 94.3438\n"
     ]
    }
   ],
   "source": [
    "print(\"train_acc: {:.4f}\".format(test(trainloader)))\n",
    "print(\"test_acc: {:.4f}\".format(test(testloader)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: [1/50], Step: [100/1484], Loss: 0.2187\n",
      "Epoch: [1/50], Step: [200/1484], Loss: 0.1852\n",
      "Epoch: [1/50], Step: [300/1484], Loss: 0.1556\n",
      "Epoch: [1/50], Step: [400/1484], Loss: 0.1246\n",
      "Epoch: [1/50], Step: [500/1484], Loss: 0.1162\n",
      "Epoch: [1/50], Step: [600/1484], Loss: 0.1119\n",
      "Epoch: [1/50], Step: [700/1484], Loss: 0.1085\n",
      "Epoch: [1/50], Step: [800/1484], Loss: 0.1062\n",
      "Epoch: [1/50], Step: [900/1484], Loss: 0.1097\n",
      "Epoch: [1/50], Step: [1000/1484], Loss: 0.0983\n",
      "Epoch: [1/50], Step: [1100/1484], Loss: 0.1052\n",
      "Epoch: [1/50], Step: [1200/1484], Loss: 0.1028\n",
      "Epoch: [1/50], Step: [1300/1484], Loss: 0.1026\n",
      "Epoch: [1/50], Step: [1400/1484], Loss: 0.1023\n",
      "train_acc: 96.7682\n",
      "test_acc: 96.7603\n",
      "Epoch: [2/50], Step: [100/1484], Loss: 0.1015\n",
      "Epoch: [2/50], Step: [200/1484], Loss: 0.0961\n",
      "Epoch: [2/50], Step: [300/1484], Loss: 0.1024\n",
      "Epoch: [2/50], Step: [400/1484], Loss: 0.0992\n",
      "Epoch: [2/50], Step: [500/1484], Loss: 0.1051\n",
      "Epoch: [2/50], Step: [600/1484], Loss: 0.0982\n",
      "Epoch: [2/50], Step: [700/1484], Loss: 0.0941\n",
      "Epoch: [2/50], Step: [800/1484], Loss: 0.1023\n",
      "Epoch: [2/50], Step: [900/1484], Loss: 0.1013\n",
      "Epoch: [2/50], Step: [1000/1484], Loss: 0.0913\n",
      "Epoch: [2/50], Step: [1100/1484], Loss: 0.1006\n",
      "Epoch: [2/50], Step: [1200/1484], Loss: 0.0957\n",
      "Epoch: [2/50], Step: [1300/1484], Loss: 0.1009\n",
      "Epoch: [2/50], Step: [1400/1484], Loss: 0.0981\n",
      "train_acc: 97.0027\n",
      "test_acc: 96.9943\n",
      "Epoch: [3/50], Step: [100/1484], Loss: 0.0945\n",
      "Epoch: [3/50], Step: [200/1484], Loss: 0.0961\n",
      "Epoch: [3/50], Step: [300/1484], Loss: 0.0984\n",
      "Epoch: [3/50], Step: [400/1484], Loss: 0.1027\n",
      "Epoch: [3/50], Step: [500/1484], Loss: 0.0892\n",
      "Epoch: [3/50], Step: [600/1484], Loss: 0.0959\n",
      "Epoch: [3/50], Step: [700/1484], Loss: 0.0981\n",
      "Epoch: [3/50], Step: [800/1484], Loss: 0.0888\n",
      "Epoch: [3/50], Step: [900/1484], Loss: 0.0919\n",
      "Epoch: [3/50], Step: [1000/1484], Loss: 0.1013\n",
      "Epoch: [3/50], Step: [1100/1484], Loss: 0.1007\n",
      "Epoch: [3/50], Step: [1200/1484], Loss: 0.0916\n",
      "Epoch: [3/50], Step: [1300/1484], Loss: 0.0957\n",
      "Epoch: [3/50], Step: [1400/1484], Loss: 0.0929\n",
      "train_acc: 97.0627\n",
      "test_acc: 97.0431\n",
      "Epoch: [4/50], Step: [100/1484], Loss: 0.0931\n",
      "Epoch: [4/50], Step: [200/1484], Loss: 0.0898\n",
      "Epoch: [4/50], Step: [300/1484], Loss: 0.0875\n",
      "Epoch: [4/50], Step: [400/1484], Loss: 0.0937\n",
      "Epoch: [4/50], Step: [500/1484], Loss: 0.0944\n",
      "Epoch: [4/50], Step: [600/1484], Loss: 0.0920\n",
      "Epoch: [4/50], Step: [700/1484], Loss: 0.0952\n",
      "Epoch: [4/50], Step: [800/1484], Loss: 0.0976\n",
      "Epoch: [4/50], Step: [900/1484], Loss: 0.0952\n",
      "Epoch: [4/50], Step: [1000/1484], Loss: 0.0910\n",
      "Epoch: [4/50], Step: [1100/1484], Loss: 0.0876\n",
      "Epoch: [4/50], Step: [1200/1484], Loss: 0.0881\n",
      "Epoch: [4/50], Step: [1300/1484], Loss: 0.0910\n",
      "Epoch: [4/50], Step: [1400/1484], Loss: 0.0951\n",
      "train_acc: 97.2958\n",
      "test_acc: 97.2933\n",
      "Epoch: [5/50], Step: [100/1484], Loss: 0.0796\n",
      "Epoch: [5/50], Step: [200/1484], Loss: 0.0972\n",
      "Epoch: [5/50], Step: [300/1484], Loss: 0.0920\n",
      "Epoch: [5/50], Step: [400/1484], Loss: 0.0928\n",
      "Epoch: [5/50], Step: [500/1484], Loss: 0.0979\n",
      "Epoch: [5/50], Step: [600/1484], Loss: 0.1526\n",
      "Epoch: [5/50], Step: [700/1484], Loss: 0.1078\n",
      "Epoch: [5/50], Step: [800/1484], Loss: 0.0990\n",
      "Epoch: [5/50], Step: [900/1484], Loss: 0.0882\n",
      "Epoch: [5/50], Step: [1000/1484], Loss: 0.1040\n",
      "Epoch: [5/50], Step: [1100/1484], Loss: 0.0811\n",
      "Epoch: [5/50], Step: [1200/1484], Loss: 0.0854\n",
      "Epoch: [5/50], Step: [1300/1484], Loss: 0.0920\n",
      "Epoch: [5/50], Step: [1400/1484], Loss: 0.0942\n",
      "train_acc: 97.3363\n",
      "test_acc: 97.3447\n",
      "Epoch: [6/50], Step: [100/1484], Loss: 0.0921\n",
      "Epoch: [6/50], Step: [200/1484], Loss: 0.0893\n",
      "Epoch: [6/50], Step: [300/1484], Loss: 0.0919\n",
      "Epoch: [6/50], Step: [400/1484], Loss: 0.0823\n",
      "Epoch: [6/50], Step: [500/1484], Loss: 0.0967\n",
      "Epoch: [6/50], Step: [600/1484], Loss: 0.0931\n",
      "Epoch: [6/50], Step: [700/1484], Loss: 0.0888\n",
      "Epoch: [6/50], Step: [800/1484], Loss: 0.0928\n",
      "Epoch: [6/50], Step: [900/1484], Loss: 0.0841\n",
      "Epoch: [6/50], Step: [1000/1484], Loss: 0.0954\n",
      "Epoch: [6/50], Step: [1100/1484], Loss: 0.0847\n",
      "Epoch: [6/50], Step: [1200/1484], Loss: 0.0885\n",
      "Epoch: [6/50], Step: [1300/1484], Loss: 0.0854\n",
      "Epoch: [6/50], Step: [1400/1484], Loss: 0.0899\n",
      "train_acc: 97.3693\n",
      "test_acc: 97.3880\n",
      "Epoch: [7/50], Step: [100/1484], Loss: 0.0850\n",
      "Epoch: [7/50], Step: [200/1484], Loss: 0.0827\n",
      "Epoch: [7/50], Step: [300/1484], Loss: 0.0821\n",
      "Epoch: [7/50], Step: [400/1484], Loss: 0.0862\n",
      "Epoch: [7/50], Step: [500/1484], Loss: 0.0859\n",
      "Epoch: [7/50], Step: [600/1484], Loss: 0.0937\n",
      "Epoch: [7/50], Step: [700/1484], Loss: 0.0908\n",
      "Epoch: [7/50], Step: [800/1484], Loss: 0.0843\n",
      "Epoch: [7/50], Step: [900/1484], Loss: 0.0946\n",
      "Epoch: [7/50], Step: [1000/1484], Loss: 0.0889\n",
      "Epoch: [7/50], Step: [1100/1484], Loss: 0.0838\n",
      "Epoch: [7/50], Step: [1200/1484], Loss: 0.0804\n",
      "Epoch: [7/50], Step: [1300/1484], Loss: 0.0968\n",
      "Epoch: [7/50], Step: [1400/1484], Loss: 0.4780\n",
      "train_acc: 96.7850\n",
      "test_acc: 96.7562\n",
      "Epoch: [8/50], Step: [100/1484], Loss: 0.0984\n",
      "Epoch: [8/50], Step: [200/1484], Loss: 0.1106\n",
      "Epoch: [8/50], Step: [300/1484], Loss: 0.1064\n",
      "Epoch: [8/50], Step: [400/1484], Loss: 0.0960\n",
      "Epoch: [8/50], Step: [500/1484], Loss: 0.0929\n",
      "Epoch: [8/50], Step: [600/1484], Loss: 0.1354\n",
      "Epoch: [8/50], Step: [700/1484], Loss: 0.0972\n",
      "Epoch: [8/50], Step: [800/1484], Loss: 0.0972\n",
      "Epoch: [8/50], Step: [900/1484], Loss: 0.5118\n",
      "Epoch: [8/50], Step: [1000/1484], Loss: 0.3450\n",
      "Epoch: [8/50], Step: [1100/1484], Loss: 0.1036\n",
      "Epoch: [8/50], Step: [1200/1484], Loss: 0.1001\n",
      "Epoch: [8/50], Step: [1300/1484], Loss: 0.1015\n",
      "Epoch: [8/50], Step: [1400/1484], Loss: 0.1082\n",
      "train_acc: 97.2150\n",
      "test_acc: 97.2284\n",
      "Epoch: [9/50], Step: [100/1484], Loss: 0.1005\n",
      "Epoch: [9/50], Step: [200/1484], Loss: 0.0998\n",
      "Epoch: [9/50], Step: [300/1484], Loss: 0.0882\n",
      "Epoch: [9/50], Step: [400/1484], Loss: 0.0952\n",
      "Epoch: [9/50], Step: [500/1484], Loss: 0.0953\n",
      "Epoch: [9/50], Step: [600/1484], Loss: 0.0929\n",
      "Epoch: [9/50], Step: [700/1484], Loss: 0.0922\n",
      "Epoch: [9/50], Step: [800/1484], Loss: 0.1016\n",
      "Epoch: [9/50], Step: [900/1484], Loss: 0.0903\n",
      "Epoch: [9/50], Step: [1000/1484], Loss: 0.0972\n",
      "Epoch: [9/50], Step: [1100/1484], Loss: 0.0992\n",
      "Epoch: [9/50], Step: [1200/1484], Loss: 0.0940\n",
      "Epoch: [9/50], Step: [1300/1484], Loss: 0.0886\n",
      "Epoch: [9/50], Step: [1400/1484], Loss: 0.0956\n",
      "train_acc: 97.3336\n",
      "test_acc: 97.3423\n",
      "Epoch: [10/50], Step: [100/1484], Loss: 0.0863\n",
      "Epoch: [10/50], Step: [200/1484], Loss: 0.0871\n",
      "Epoch: [10/50], Step: [300/1484], Loss: 0.0877\n",
      "Epoch: [10/50], Step: [400/1484], Loss: 0.0922\n",
      "Epoch: [10/50], Step: [500/1484], Loss: 0.0922\n",
      "Epoch: [10/50], Step: [600/1484], Loss: 0.1031\n",
      "Epoch: [10/50], Step: [700/1484], Loss: 0.0963\n",
      "Epoch: [10/50], Step: [800/1484], Loss: 0.6164\n",
      "Epoch: [10/50], Step: [900/1484], Loss: 0.2242\n",
      "Epoch: [10/50], Step: [1000/1484], Loss: 0.1222\n",
      "Epoch: [10/50], Step: [1100/1484], Loss: 0.1418\n",
      "Epoch: [10/50], Step: [1200/1484], Loss: 0.1206\n",
      "Epoch: [10/50], Step: [1300/1484], Loss: 0.1228\n",
      "Epoch: [10/50], Step: [1400/1484], Loss: 0.0970\n",
      "train_acc: 97.1759\n",
      "test_acc: 97.2011\n",
      "Epoch: [11/50], Step: [100/1484], Loss: 0.0979\n",
      "Epoch: [11/50], Step: [200/1484], Loss: 0.1050\n",
      "Epoch: [11/50], Step: [300/1484], Loss: 0.0971\n",
      "Epoch: [11/50], Step: [400/1484], Loss: 0.0969\n",
      "Epoch: [11/50], Step: [500/1484], Loss: 0.0944\n",
      "Epoch: [11/50], Step: [600/1484], Loss: 0.0883\n",
      "Epoch: [11/50], Step: [700/1484], Loss: 0.0968\n",
      "Epoch: [11/50], Step: [800/1484], Loss: 0.0836\n",
      "Epoch: [11/50], Step: [900/1484], Loss: 0.1012\n",
      "Epoch: [11/50], Step: [1000/1484], Loss: 0.0905\n",
      "Epoch: [11/50], Step: [1100/1484], Loss: 0.0932\n",
      "Epoch: [11/50], Step: [1200/1484], Loss: 0.0890\n",
      "Epoch: [11/50], Step: [1300/1484], Loss: 0.0920\n",
      "Epoch: [11/50], Step: [1400/1484], Loss: 0.1014\n",
      "train_acc: 97.2244\n",
      "test_acc: 97.2234\n",
      "Epoch: [12/50], Step: [100/1484], Loss: 0.0982\n",
      "Epoch: [12/50], Step: [200/1484], Loss: 0.0941\n",
      "Epoch: [12/50], Step: [300/1484], Loss: 0.0954\n",
      "Epoch: [12/50], Step: [400/1484], Loss: 0.0885\n",
      "Epoch: [12/50], Step: [500/1484], Loss: 0.0851\n",
      "Epoch: [12/50], Step: [600/1484], Loss: 0.0845\n",
      "Epoch: [12/50], Step: [700/1484], Loss: 0.0824\n",
      "Epoch: [12/50], Step: [800/1484], Loss: 0.0801\n",
      "Epoch: [12/50], Step: [900/1484], Loss: 0.0916\n",
      "Epoch: [12/50], Step: [1000/1484], Loss: 0.0875\n",
      "Epoch: [12/50], Step: [1100/1484], Loss: 0.0956\n",
      "Epoch: [12/50], Step: [1200/1484], Loss: 0.0869\n",
      "Epoch: [12/50], Step: [1300/1484], Loss: 0.0936\n",
      "Epoch: [12/50], Step: [1400/1484], Loss: 0.0822\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train_acc: 97.3720\n",
      "test_acc: 97.3994\n",
      "Epoch: [13/50], Step: [100/1484], Loss: 0.0863\n",
      "Epoch: [13/50], Step: [200/1484], Loss: 0.0884\n",
      "Epoch: [13/50], Step: [300/1484], Loss: 0.4178\n",
      "Epoch: [13/50], Step: [400/1484], Loss: 0.0948\n",
      "Epoch: [13/50], Step: [500/1484], Loss: 0.0887\n",
      "Epoch: [13/50], Step: [600/1484], Loss: 0.0942\n",
      "Epoch: [13/50], Step: [700/1484], Loss: 0.0917\n",
      "Epoch: [13/50], Step: [800/1484], Loss: 0.0929\n",
      "Epoch: [13/50], Step: [900/1484], Loss: 0.0931\n",
      "Epoch: [13/50], Step: [1000/1484], Loss: 0.0952\n",
      "Epoch: [13/50], Step: [1100/1484], Loss: 0.0951\n",
      "Epoch: [13/50], Step: [1200/1484], Loss: 0.0907\n",
      "Epoch: [13/50], Step: [1300/1484], Loss: 0.0910\n",
      "Epoch: [13/50], Step: [1400/1484], Loss: 0.0827\n",
      "train_acc: 97.3100\n",
      "test_acc: 97.3571\n",
      "Epoch: [14/50], Step: [100/1484], Loss: 0.0900\n",
      "Epoch: [14/50], Step: [200/1484], Loss: 0.0902\n",
      "Epoch: [14/50], Step: [300/1484], Loss: 0.0896\n",
      "Epoch: [14/50], Step: [400/1484], Loss: 0.0843\n",
      "Epoch: [14/50], Step: [500/1484], Loss: 0.0909\n",
      "Epoch: [14/50], Step: [600/1484], Loss: 0.0872\n",
      "Epoch: [14/50], Step: [700/1484], Loss: 0.0836\n",
      "Epoch: [14/50], Step: [800/1484], Loss: 0.0923\n",
      "Epoch: [14/50], Step: [900/1484], Loss: 0.0847\n",
      "Epoch: [14/50], Step: [1000/1484], Loss: 0.0873\n",
      "Epoch: [14/50], Step: [1100/1484], Loss: 0.0918\n",
      "Epoch: [14/50], Step: [1200/1484], Loss: 0.0986\n",
      "Epoch: [14/50], Step: [1300/1484], Loss: 0.0932\n",
      "Epoch: [14/50], Step: [1400/1484], Loss: 0.0818\n",
      "train_acc: 97.1691\n",
      "test_acc: 97.1771\n",
      "Epoch: [15/50], Step: [100/1484], Loss: 0.0946\n",
      "Epoch: [15/50], Step: [200/1484], Loss: 0.0813\n",
      "Epoch: [15/50], Step: [300/1484], Loss: 0.0823\n",
      "Epoch: [15/50], Step: [400/1484], Loss: 0.0776\n",
      "Epoch: [15/50], Step: [500/1484], Loss: 0.0799\n",
      "Epoch: [15/50], Step: [600/1484], Loss: 0.0796\n",
      "Epoch: [15/50], Step: [700/1484], Loss: 0.0883\n",
      "Epoch: [15/50], Step: [800/1484], Loss: 0.0831\n",
      "Epoch: [15/50], Step: [900/1484], Loss: 0.0909\n",
      "Epoch: [15/50], Step: [1000/1484], Loss: 0.0836\n",
      "Epoch: [15/50], Step: [1100/1484], Loss: 0.0899\n",
      "Epoch: [15/50], Step: [1200/1484], Loss: 0.0901\n",
      "Epoch: [15/50], Step: [1300/1484], Loss: 0.0849\n",
      "Epoch: [15/50], Step: [1400/1484], Loss: 0.0911\n",
      "train_acc: 97.2709\n",
      "test_acc: 97.2604\n",
      "Epoch: [16/50], Step: [100/1484], Loss: 0.0875\n",
      "Epoch: [16/50], Step: [200/1484], Loss: 0.0849\n",
      "Epoch: [16/50], Step: [300/1484], Loss: 0.0788\n",
      "Epoch: [16/50], Step: [400/1484], Loss: 0.0928\n",
      "Epoch: [16/50], Step: [500/1484], Loss: 0.0871\n",
      "Epoch: [16/50], Step: [600/1484], Loss: 0.0834\n",
      "Epoch: [16/50], Step: [700/1484], Loss: 0.0863\n",
      "Epoch: [16/50], Step: [800/1484], Loss: 0.0858\n",
      "Epoch: [16/50], Step: [900/1484], Loss: 0.0831\n",
      "Epoch: [16/50], Step: [1000/1484], Loss: 0.0810\n",
      "Epoch: [16/50], Step: [1100/1484], Loss: 0.0905\n",
      "Epoch: [16/50], Step: [1200/1484], Loss: 0.0845\n",
      "Epoch: [16/50], Step: [1300/1484], Loss: 0.0835\n",
      "Epoch: [16/50], Step: [1400/1484], Loss: 0.0791\n",
      "train_acc: 97.6489\n",
      "test_acc: 97.6873\n",
      "Epoch: [17/50], Step: [100/1484], Loss: 0.0805\n",
      "Epoch: [17/50], Step: [200/1484], Loss: 0.0904\n",
      "Epoch: [17/50], Step: [300/1484], Loss: 0.0841\n",
      "Epoch: [17/50], Step: [400/1484], Loss: 0.0873\n",
      "Epoch: [17/50], Step: [500/1484], Loss: 0.0812\n",
      "Epoch: [17/50], Step: [600/1484], Loss: 0.0787\n",
      "Epoch: [17/50], Step: [700/1484], Loss: 0.0779\n",
      "Epoch: [17/50], Step: [800/1484], Loss: 0.0906\n",
      "Epoch: [17/50], Step: [900/1484], Loss: 0.0831\n",
      "Epoch: [17/50], Step: [1000/1484], Loss: 0.0846\n",
      "Epoch: [17/50], Step: [1100/1484], Loss: 0.0861\n",
      "Epoch: [17/50], Step: [1200/1484], Loss: 0.0807\n",
      "Epoch: [17/50], Step: [1300/1484], Loss: 0.0806\n",
      "Epoch: [17/50], Step: [1400/1484], Loss: 0.0891\n",
      "train_acc: 97.2695\n",
      "test_acc: 97.2542\n",
      "Epoch: [18/50], Step: [100/1484], Loss: 0.0798\n",
      "Epoch: [18/50], Step: [200/1484], Loss: 0.0893\n",
      "Epoch: [18/50], Step: [300/1484], Loss: 0.0909\n",
      "Epoch: [18/50], Step: [400/1484], Loss: 0.0844\n",
      "Epoch: [18/50], Step: [500/1484], Loss: 0.0848\n",
      "Epoch: [18/50], Step: [600/1484], Loss: 0.0814\n",
      "Epoch: [18/50], Step: [700/1484], Loss: 0.0866\n",
      "Epoch: [18/50], Step: [800/1484], Loss: 0.0817\n",
      "Epoch: [18/50], Step: [900/1484], Loss: 0.0752\n",
      "Epoch: [18/50], Step: [1000/1484], Loss: 0.0940\n",
      "Epoch: [18/50], Step: [1100/1484], Loss: 0.0890\n",
      "Epoch: [18/50], Step: [1200/1484], Loss: 0.0816\n",
      "Epoch: [18/50], Step: [1300/1484], Loss: 0.0834\n",
      "Epoch: [18/50], Step: [1400/1484], Loss: 0.0821\n",
      "train_acc: 97.4420\n",
      "test_acc: 97.4404\n",
      "Epoch: [19/50], Step: [100/1484], Loss: 0.0825\n",
      "Epoch: [19/50], Step: [200/1484], Loss: 0.0863\n",
      "Epoch: [19/50], Step: [300/1484], Loss: 0.0818\n",
      "Epoch: [19/50], Step: [400/1484], Loss: 0.0833\n",
      "Epoch: [19/50], Step: [500/1484], Loss: 0.0883\n",
      "Epoch: [19/50], Step: [600/1484], Loss: 0.0833\n",
      "Epoch: [19/50], Step: [700/1484], Loss: 0.0804\n",
      "Epoch: [19/50], Step: [800/1484], Loss: 0.0808\n",
      "Epoch: [19/50], Step: [900/1484], Loss: 0.0870\n",
      "Epoch: [19/50], Step: [1000/1484], Loss: 0.0812\n",
      "Epoch: [19/50], Step: [1100/1484], Loss: 0.0791\n",
      "Epoch: [19/50], Step: [1200/1484], Loss: 0.0825\n",
      "Epoch: [19/50], Step: [1300/1484], Loss: 0.0944\n",
      "Epoch: [19/50], Step: [1400/1484], Loss: 0.1093\n",
      "train_acc: 97.3403\n",
      "test_acc: 97.3501\n",
      "Epoch: [20/50], Step: [100/1484], Loss: 0.0860\n",
      "Epoch: [20/50], Step: [200/1484], Loss: 0.0849\n",
      "Epoch: [20/50], Step: [300/1484], Loss: 0.0882\n",
      "Epoch: [20/50], Step: [400/1484], Loss: 0.0897\n",
      "Epoch: [20/50], Step: [500/1484], Loss: 0.0861\n",
      "Epoch: [20/50], Step: [600/1484], Loss: 0.0807\n",
      "Epoch: [20/50], Step: [700/1484], Loss: 0.0807\n",
      "Epoch: [20/50], Step: [800/1484], Loss: 0.0864\n",
      "Epoch: [20/50], Step: [900/1484], Loss: 0.0832\n",
      "Epoch: [20/50], Step: [1000/1484], Loss: 0.0832\n",
      "Epoch: [20/50], Step: [1100/1484], Loss: 0.0712\n",
      "Epoch: [20/50], Step: [1200/1484], Loss: 0.0838\n",
      "Epoch: [20/50], Step: [1300/1484], Loss: 0.0815\n",
      "Epoch: [20/50], Step: [1400/1484], Loss: 0.0901\n",
      "train_acc: 97.5472\n",
      "test_acc: 97.5785\n",
      "Epoch: [21/50], Step: [100/1484], Loss: 0.0846\n",
      "Epoch: [21/50], Step: [200/1484], Loss: 0.0799\n",
      "Epoch: [21/50], Step: [300/1484], Loss: 0.0825\n",
      "Epoch: [21/50], Step: [400/1484], Loss: 0.0763\n",
      "Epoch: [21/50], Step: [500/1484], Loss: 0.0830\n",
      "Epoch: [21/50], Step: [600/1484], Loss: 0.0776\n",
      "Epoch: [21/50], Step: [700/1484], Loss: 0.0789\n",
      "Epoch: [21/50], Step: [800/1484], Loss: 0.0749\n",
      "Epoch: [21/50], Step: [900/1484], Loss: 0.0797\n",
      "Epoch: [21/50], Step: [1000/1484], Loss: 0.0850\n",
      "Epoch: [21/50], Step: [1100/1484], Loss: 0.0810\n",
      "Epoch: [21/50], Step: [1200/1484], Loss: 0.0873\n",
      "Epoch: [21/50], Step: [1300/1484], Loss: 0.0797\n",
      "Epoch: [21/50], Step: [1400/1484], Loss: 0.0836\n",
      "train_acc: 97.3632\n",
      "test_acc: 97.3423\n",
      "Epoch: [22/50], Step: [100/1484], Loss: 0.0886\n",
      "Epoch: [22/50], Step: [200/1484], Loss: 0.0814\n",
      "Epoch: [22/50], Step: [300/1484], Loss: 0.0792\n",
      "Epoch: [22/50], Step: [400/1484], Loss: 0.0829\n",
      "Epoch: [22/50], Step: [500/1484], Loss: 0.0820\n",
      "Epoch: [22/50], Step: [600/1484], Loss: 0.0821\n",
      "Epoch: [22/50], Step: [700/1484], Loss: 0.0803\n",
      "Epoch: [22/50], Step: [800/1484], Loss: 0.0876\n",
      "Epoch: [22/50], Step: [900/1484], Loss: 0.0788\n",
      "Epoch: [22/50], Step: [1000/1484], Loss: 0.0814\n",
      "Epoch: [22/50], Step: [1100/1484], Loss: 0.0772\n",
      "Epoch: [22/50], Step: [1200/1484], Loss: 0.0812\n",
      "Epoch: [22/50], Step: [1300/1484], Loss: 0.0771\n",
      "Epoch: [22/50], Step: [1400/1484], Loss: 0.0760\n",
      "train_acc: 97.5505\n",
      "test_acc: 97.6130\n",
      "Epoch: [23/50], Step: [100/1484], Loss: 0.0763\n",
      "Epoch: [23/50], Step: [200/1484], Loss: 0.0803\n",
      "Epoch: [23/50], Step: [300/1484], Loss: 0.0727\n",
      "Epoch: [23/50], Step: [400/1484], Loss: 0.0821\n",
      "Epoch: [23/50], Step: [500/1484], Loss: 0.0800\n",
      "Epoch: [23/50], Step: [600/1484], Loss: 0.0791\n",
      "Epoch: [23/50], Step: [700/1484], Loss: 0.0761\n",
      "Epoch: [23/50], Step: [800/1484], Loss: 0.0720\n",
      "Epoch: [23/50], Step: [900/1484], Loss: 0.0824\n",
      "Epoch: [23/50], Step: [1000/1484], Loss: 0.0777\n",
      "Epoch: [23/50], Step: [1100/1484], Loss: 0.0803\n",
      "Epoch: [23/50], Step: [1200/1484], Loss: 0.0790\n",
      "Epoch: [23/50], Step: [1300/1484], Loss: 0.0824\n",
      "Epoch: [23/50], Step: [1400/1484], Loss: 0.0830\n",
      "train_acc: 97.3565\n",
      "test_acc: 97.3566\n",
      "Epoch: [24/50], Step: [100/1484], Loss: 0.0747\n",
      "Epoch: [24/50], Step: [200/1484], Loss: 0.0857\n",
      "Epoch: [24/50], Step: [300/1484], Loss: 0.0714\n",
      "Epoch: [24/50], Step: [400/1484], Loss: 0.0806\n",
      "Epoch: [24/50], Step: [500/1484], Loss: 0.0836\n",
      "Epoch: [24/50], Step: [600/1484], Loss: 0.0827\n",
      "Epoch: [24/50], Step: [700/1484], Loss: 0.0815\n",
      "Epoch: [24/50], Step: [800/1484], Loss: 0.0845\n",
      "Epoch: [24/50], Step: [900/1484], Loss: 0.0787\n",
      "Epoch: [24/50], Step: [1000/1484], Loss: 0.0789\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: [24/50], Step: [1100/1484], Loss: 0.0769\n",
      "Epoch: [24/50], Step: [1200/1484], Loss: 0.0812\n",
      "Epoch: [24/50], Step: [1300/1484], Loss: 0.0800\n",
      "Epoch: [24/50], Step: [1400/1484], Loss: 0.0763\n",
      "train_acc: 97.4353\n",
      "test_acc: 97.4943\n",
      "Epoch: [25/50], Step: [100/1484], Loss: 0.0778\n",
      "Epoch: [25/50], Step: [200/1484], Loss: 0.0804\n",
      "Epoch: [25/50], Step: [300/1484], Loss: 0.0758\n",
      "Epoch: [25/50], Step: [400/1484], Loss: 0.0768\n",
      "Epoch: [25/50], Step: [500/1484], Loss: 0.0845\n",
      "Epoch: [25/50], Step: [600/1484], Loss: 0.0769\n",
      "Epoch: [25/50], Step: [700/1484], Loss: 0.0740\n",
      "Epoch: [25/50], Step: [800/1484], Loss: 0.0806\n",
      "Epoch: [25/50], Step: [900/1484], Loss: 0.0815\n",
      "Epoch: [25/50], Step: [1000/1484], Loss: 0.0805\n",
      "Epoch: [25/50], Step: [1100/1484], Loss: 0.0787\n",
      "Epoch: [25/50], Step: [1200/1484], Loss: 0.0757\n",
      "Epoch: [25/50], Step: [1300/1484], Loss: 0.0784\n",
      "Epoch: [25/50], Step: [1400/1484], Loss: 0.0824\n",
      "train_acc: 97.3747\n",
      "test_acc: 97.4052\n",
      "Epoch: [26/50], Step: [100/1484], Loss: 0.0758\n",
      "Epoch: [26/50], Step: [200/1484], Loss: 0.0835\n",
      "Epoch: [26/50], Step: [300/1484], Loss: 0.0763\n",
      "Epoch: [26/50], Step: [400/1484], Loss: 0.0822\n",
      "Epoch: [26/50], Step: [500/1484], Loss: 0.0745\n",
      "Epoch: [26/50], Step: [600/1484], Loss: 0.0766\n",
      "Epoch: [26/50], Step: [700/1484], Loss: 0.0770\n",
      "Epoch: [26/50], Step: [800/1484], Loss: 0.0810\n",
      "Epoch: [26/50], Step: [900/1484], Loss: 0.0813\n",
      "Epoch: [26/50], Step: [1000/1484], Loss: 0.0799\n",
      "Epoch: [26/50], Step: [1100/1484], Loss: 0.0723\n",
      "Epoch: [26/50], Step: [1200/1484], Loss: 0.0736\n",
      "Epoch: [26/50], Step: [1300/1484], Loss: 0.0769\n",
      "Epoch: [26/50], Step: [1400/1484], Loss: 0.0822\n",
      "train_acc: 97.3922\n",
      "test_acc: 97.4597\n",
      "Epoch: [27/50], Step: [100/1484], Loss: 0.0801\n",
      "Epoch: [27/50], Step: [200/1484], Loss: 0.0818\n",
      "Epoch: [27/50], Step: [300/1484], Loss: 0.0754\n",
      "Epoch: [27/50], Step: [400/1484], Loss: 0.0783\n",
      "Epoch: [27/50], Step: [500/1484], Loss: 0.0750\n",
      "Epoch: [27/50], Step: [600/1484], Loss: 0.0762\n",
      "Epoch: [27/50], Step: [700/1484], Loss: 0.0774\n",
      "Epoch: [27/50], Step: [800/1484], Loss: 0.0803\n",
      "Epoch: [27/50], Step: [900/1484], Loss: 0.0781\n",
      "Epoch: [27/50], Step: [1000/1484], Loss: 0.0839\n",
      "Epoch: [27/50], Step: [1100/1484], Loss: 0.0726\n",
      "Epoch: [27/50], Step: [1200/1484], Loss: 0.0737\n",
      "Epoch: [27/50], Step: [1300/1484], Loss: 0.0696\n",
      "Epoch: [27/50], Step: [1400/1484], Loss: 0.0828\n",
      "train_acc: 97.4488\n",
      "test_acc: 97.4589\n",
      "Epoch: [28/50], Step: [100/1484], Loss: 0.0815\n",
      "Epoch: [28/50], Step: [200/1484], Loss: 0.0695\n",
      "Epoch: [28/50], Step: [300/1484], Loss: 0.0738\n",
      "Epoch: [28/50], Step: [400/1484], Loss: 0.0736\n",
      "Epoch: [28/50], Step: [500/1484], Loss: 0.0743\n",
      "Epoch: [28/50], Step: [600/1484], Loss: 0.0970\n",
      "Epoch: [28/50], Step: [700/1484], Loss: 0.0748\n",
      "Epoch: [28/50], Step: [800/1484], Loss: 0.0723\n",
      "Epoch: [28/50], Step: [900/1484], Loss: 0.0803\n",
      "Epoch: [28/50], Step: [1000/1484], Loss: 0.0734\n",
      "Epoch: [28/50], Step: [1100/1484], Loss: 0.0755\n",
      "Epoch: [28/50], Step: [1200/1484], Loss: 0.0803\n",
      "Epoch: [28/50], Step: [1300/1484], Loss: 0.0797\n",
      "Epoch: [28/50], Step: [1400/1484], Loss: 0.0845\n",
      "train_acc: 97.5950\n",
      "test_acc: 97.6215\n",
      "Epoch: [29/50], Step: [100/1484], Loss: 0.0780\n",
      "Epoch: [29/50], Step: [200/1484], Loss: 0.0798\n",
      "Epoch: [29/50], Step: [300/1484], Loss: 0.0829\n",
      "Epoch: [29/50], Step: [400/1484], Loss: 0.0775\n",
      "Epoch: [29/50], Step: [500/1484], Loss: 0.0742\n",
      "Epoch: [29/50], Step: [600/1484], Loss: 0.0779\n",
      "Epoch: [29/50], Step: [700/1484], Loss: 0.0757\n",
      "Epoch: [29/50], Step: [800/1484], Loss: 0.0815\n",
      "Epoch: [29/50], Step: [900/1484], Loss: 0.0692\n",
      "Epoch: [29/50], Step: [1000/1484], Loss: 0.0805\n",
      "Epoch: [29/50], Step: [1100/1484], Loss: 0.0767\n",
      "Epoch: [29/50], Step: [1200/1484], Loss: 0.0783\n",
      "Epoch: [29/50], Step: [1300/1484], Loss: 0.0741\n",
      "Epoch: [29/50], Step: [1400/1484], Loss: 0.0766\n",
      "train_acc: 97.7628\n",
      "test_acc: 97.7793\n",
      "Epoch: [30/50], Step: [100/1484], Loss: 0.0743\n",
      "Epoch: [30/50], Step: [200/1484], Loss: 0.0765\n",
      "Epoch: [30/50], Step: [300/1484], Loss: 0.0752\n",
      "Epoch: [30/50], Step: [400/1484], Loss: 0.0771\n",
      "Epoch: [30/50], Step: [500/1484], Loss: 0.0762\n",
      "Epoch: [30/50], Step: [600/1484], Loss: 0.0804\n",
      "Epoch: [30/50], Step: [700/1484], Loss: 0.0776\n",
      "Epoch: [30/50], Step: [800/1484], Loss: 0.0815\n",
      "Epoch: [30/50], Step: [900/1484], Loss: 0.0800\n",
      "Epoch: [30/50], Step: [1000/1484], Loss: 0.0756\n",
      "Epoch: [30/50], Step: [1100/1484], Loss: 0.0746\n",
      "Epoch: [30/50], Step: [1200/1484], Loss: 0.0699\n",
      "Epoch: [30/50], Step: [1300/1484], Loss: 0.0776\n",
      "Epoch: [30/50], Step: [1400/1484], Loss: 0.0740\n",
      "train_acc: 97.4636\n",
      "test_acc: 97.4586\n",
      "Epoch: [31/50], Step: [100/1484], Loss: 0.0725\n",
      "Epoch: [31/50], Step: [200/1484], Loss: 0.0767\n",
      "Epoch: [31/50], Step: [300/1484], Loss: 0.0731\n",
      "Epoch: [31/50], Step: [400/1484], Loss: 0.0771\n",
      "Epoch: [31/50], Step: [500/1484], Loss: 0.0695\n",
      "Epoch: [31/50], Step: [600/1484], Loss: 0.0692\n",
      "Epoch: [31/50], Step: [700/1484], Loss: 0.0777\n",
      "Epoch: [31/50], Step: [800/1484], Loss: 0.0770\n",
      "Epoch: [31/50], Step: [900/1484], Loss: 0.0752\n",
      "Epoch: [31/50], Step: [1000/1484], Loss: 0.0802\n",
      "Epoch: [31/50], Step: [1100/1484], Loss: 0.0841\n",
      "Epoch: [31/50], Step: [1200/1484], Loss: 0.0776\n",
      "Epoch: [31/50], Step: [1300/1484], Loss: 0.0710\n",
      "Epoch: [31/50], Step: [1400/1484], Loss: 0.0770\n",
      "train_acc: 97.8814\n",
      "test_acc: 97.8794\n",
      "Epoch: [32/50], Step: [100/1484], Loss: 0.0737\n",
      "Epoch: [32/50], Step: [200/1484], Loss: 0.0772\n",
      "Epoch: [32/50], Step: [300/1484], Loss: 0.0683\n",
      "Epoch: [32/50], Step: [400/1484], Loss: 0.0791\n",
      "Epoch: [32/50], Step: [500/1484], Loss: 0.0799\n",
      "Epoch: [32/50], Step: [600/1484], Loss: 0.0689\n",
      "Epoch: [32/50], Step: [700/1484], Loss: 0.0711\n",
      "Epoch: [32/50], Step: [800/1484], Loss: 0.0744\n",
      "Epoch: [32/50], Step: [900/1484], Loss: 0.0719\n",
      "Epoch: [32/50], Step: [1000/1484], Loss: 0.0847\n",
      "Epoch: [32/50], Step: [1100/1484], Loss: 0.0719\n",
      "Epoch: [32/50], Step: [1200/1484], Loss: 0.0791\n",
      "Epoch: [32/50], Step: [1300/1484], Loss: 0.0787\n",
      "Epoch: [32/50], Step: [1400/1484], Loss: 0.0791\n",
      "train_acc: 97.7534\n",
      "test_acc: 97.8095\n",
      "Epoch: [33/50], Step: [100/1484], Loss: 0.0799\n",
      "Epoch: [33/50], Step: [200/1484], Loss: 0.0763\n",
      "Epoch: [33/50], Step: [300/1484], Loss: 0.0772\n",
      "Epoch: [33/50], Step: [400/1484], Loss: 0.0756\n",
      "Epoch: [33/50], Step: [500/1484], Loss: 0.0788\n",
      "Epoch: [33/50], Step: [600/1484], Loss: 0.0636\n",
      "Epoch: [33/50], Step: [700/1484], Loss: 0.0716\n",
      "Epoch: [33/50], Step: [800/1484], Loss: 0.0744\n",
      "Epoch: [33/50], Step: [900/1484], Loss: 0.0811\n",
      "Epoch: [33/50], Step: [1000/1484], Loss: 0.0708\n",
      "Epoch: [33/50], Step: [1100/1484], Loss: 0.0799\n",
      "Epoch: [33/50], Step: [1200/1484], Loss: 0.0719\n",
      "Epoch: [33/50], Step: [1300/1484], Loss: 0.0754\n",
      "Epoch: [33/50], Step: [1400/1484], Loss: 0.0802\n",
      "train_acc: 97.4892\n",
      "test_acc: 97.4853\n",
      "Epoch: [34/50], Step: [100/1484], Loss: 0.0684\n",
      "Epoch: [34/50], Step: [200/1484], Loss: 0.0768\n",
      "Epoch: [34/50], Step: [300/1484], Loss: 0.0751\n",
      "Epoch: [34/50], Step: [400/1484], Loss: 0.0728\n",
      "Epoch: [34/50], Step: [500/1484], Loss: 0.0791\n",
      "Epoch: [34/50], Step: [600/1484], Loss: 0.0757\n",
      "Epoch: [34/50], Step: [700/1484], Loss: 0.0733\n",
      "Epoch: [34/50], Step: [800/1484], Loss: 0.0785\n",
      "Epoch: [34/50], Step: [900/1484], Loss: 0.0819\n",
      "Epoch: [34/50], Step: [1000/1484], Loss: 0.0727\n",
      "Epoch: [34/50], Step: [1100/1484], Loss: 0.0832\n",
      "Epoch: [34/50], Step: [1200/1484], Loss: 0.0805\n",
      "Epoch: [34/50], Step: [1300/1484], Loss: 0.0713\n",
      "Epoch: [34/50], Step: [1400/1484], Loss: 0.0698\n",
      "train_acc: 97.8363\n",
      "test_acc: 97.8826\n",
      "Epoch: [35/50], Step: [100/1484], Loss: 0.0707\n",
      "Epoch: [35/50], Step: [200/1484], Loss: 0.0655\n",
      "Epoch: [35/50], Step: [300/1484], Loss: 0.0739\n",
      "Epoch: [35/50], Step: [400/1484], Loss: 0.0742\n",
      "Epoch: [35/50], Step: [500/1484], Loss: 0.0716\n",
      "Epoch: [35/50], Step: [600/1484], Loss: 0.0811\n",
      "Epoch: [35/50], Step: [700/1484], Loss: 0.0768\n",
      "Epoch: [35/50], Step: [800/1484], Loss: 0.0683\n",
      "Epoch: [35/50], Step: [900/1484], Loss: 0.0730\n",
      "Epoch: [35/50], Step: [1000/1484], Loss: 0.0758\n",
      "Epoch: [35/50], Step: [1100/1484], Loss: 0.0692\n",
      "Epoch: [35/50], Step: [1200/1484], Loss: 0.0781\n",
      "Epoch: [35/50], Step: [1300/1484], Loss: 0.0735\n",
      "Epoch: [35/50], Step: [1400/1484], Loss: 0.0704\n",
      "train_acc: 97.7783\n",
      "test_acc: 97.8108\n",
      "Epoch: [36/50], Step: [100/1484], Loss: 0.0753\n",
      "Epoch: [36/50], Step: [200/1484], Loss: 0.0739\n",
      "Epoch: [36/50], Step: [300/1484], Loss: 0.0712\n",
      "Epoch: [36/50], Step: [400/1484], Loss: 0.0704\n",
      "Epoch: [36/50], Step: [500/1484], Loss: 0.0675\n",
      "Epoch: [36/50], Step: [600/1484], Loss: 0.0761\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: [36/50], Step: [700/1484], Loss: 0.0726\n",
      "Epoch: [36/50], Step: [800/1484], Loss: 0.0779\n",
      "Epoch: [36/50], Step: [900/1484], Loss: 0.0693\n",
      "Epoch: [36/50], Step: [1000/1484], Loss: 0.0700\n",
      "Epoch: [36/50], Step: [1100/1484], Loss: 0.0699\n",
      "Epoch: [36/50], Step: [1200/1484], Loss: 0.0808\n",
      "Epoch: [36/50], Step: [1300/1484], Loss: 0.0727\n",
      "Epoch: [36/50], Step: [1400/1484], Loss: 0.0743\n",
      "train_acc: 96.6139\n",
      "test_acc: 96.5670\n",
      "Epoch: [37/50], Step: [100/1484], Loss: 0.0758\n",
      "Epoch: [37/50], Step: [200/1484], Loss: 0.0685\n",
      "Epoch: [37/50], Step: [300/1484], Loss: 0.0757\n",
      "Epoch: [37/50], Step: [400/1484], Loss: 0.0659\n",
      "Epoch: [37/50], Step: [500/1484], Loss: 0.0773\n",
      "Epoch: [37/50], Step: [600/1484], Loss: 0.0733\n",
      "Epoch: [37/50], Step: [700/1484], Loss: 0.0670\n",
      "Epoch: [37/50], Step: [800/1484], Loss: 0.0742\n",
      "Epoch: [37/50], Step: [900/1484], Loss: 0.0709\n",
      "Epoch: [37/50], Step: [1000/1484], Loss: 0.0704\n",
      "Epoch: [37/50], Step: [1100/1484], Loss: 0.0764\n",
      "Epoch: [37/50], Step: [1200/1484], Loss: 0.0733\n",
      "Epoch: [37/50], Step: [1300/1484], Loss: 0.0757\n",
      "Epoch: [37/50], Step: [1400/1484], Loss: 0.0752\n",
      "train_acc: 97.9724\n",
      "test_acc: 97.9651\n",
      "Epoch: [38/50], Step: [100/1484], Loss: 0.0647\n",
      "Epoch: [38/50], Step: [200/1484], Loss: 0.0602\n",
      "Epoch: [38/50], Step: [300/1484], Loss: 0.0781\n",
      "Epoch: [38/50], Step: [400/1484], Loss: 0.0803\n",
      "Epoch: [38/50], Step: [500/1484], Loss: 0.0684\n",
      "Epoch: [38/50], Step: [600/1484], Loss: 0.0715\n",
      "Epoch: [38/50], Step: [700/1484], Loss: 0.0713\n",
      "Epoch: [38/50], Step: [800/1484], Loss: 0.0767\n",
      "Epoch: [38/50], Step: [900/1484], Loss: 0.0746\n",
      "Epoch: [38/50], Step: [1000/1484], Loss: 0.0753\n",
      "Epoch: [38/50], Step: [1100/1484], Loss: 0.0768\n",
      "Epoch: [38/50], Step: [1200/1484], Loss: 0.0693\n",
      "Epoch: [38/50], Step: [1300/1484], Loss: 0.0785\n",
      "Epoch: [38/50], Step: [1400/1484], Loss: 0.0704\n",
      "train_acc: 97.9353\n",
      "test_acc: 97.9431\n",
      "Epoch: [39/50], Step: [100/1484], Loss: 0.0713\n",
      "Epoch: [39/50], Step: [200/1484], Loss: 0.0700\n",
      "Epoch: [39/50], Step: [300/1484], Loss: 0.0660\n",
      "Epoch: [39/50], Step: [400/1484], Loss: 0.0722\n",
      "Epoch: [39/50], Step: [500/1484], Loss: 0.0832\n",
      "Epoch: [39/50], Step: [600/1484], Loss: 0.0726\n",
      "Epoch: [39/50], Step: [700/1484], Loss: 0.0737\n",
      "Epoch: [39/50], Step: [800/1484], Loss: 0.0730\n",
      "Epoch: [39/50], Step: [900/1484], Loss: 0.0700\n",
      "Epoch: [39/50], Step: [1000/1484], Loss: 0.0716\n",
      "Epoch: [39/50], Step: [1100/1484], Loss: 0.0751\n",
      "Epoch: [39/50], Step: [1200/1484], Loss: 0.0737\n",
      "Epoch: [39/50], Step: [1300/1484], Loss: 0.0750\n",
      "Epoch: [39/50], Step: [1400/1484], Loss: 0.0717\n",
      "train_acc: 97.6826\n",
      "test_acc: 97.7001\n",
      "Epoch: [40/50], Step: [100/1484], Loss: 0.0681\n",
      "Epoch: [40/50], Step: [200/1484], Loss: 0.0858\n",
      "Epoch: [40/50], Step: [300/1484], Loss: 0.0693\n",
      "Epoch: [40/50], Step: [400/1484], Loss: 0.0707\n",
      "Epoch: [40/50], Step: [500/1484], Loss: 0.0764\n",
      "Epoch: [40/50], Step: [600/1484], Loss: 0.0805\n",
      "Epoch: [40/50], Step: [700/1484], Loss: 0.0636\n",
      "Epoch: [40/50], Step: [800/1484], Loss: 0.0694\n",
      "Epoch: [40/50], Step: [900/1484], Loss: 0.0688\n",
      "Epoch: [40/50], Step: [1000/1484], Loss: 0.0708\n",
      "Epoch: [40/50], Step: [1100/1484], Loss: 0.0669\n",
      "Epoch: [40/50], Step: [1200/1484], Loss: 0.0721\n",
      "Epoch: [40/50], Step: [1300/1484], Loss: 0.0720\n",
      "Epoch: [40/50], Step: [1400/1484], Loss: 0.0723\n",
      "train_acc: 97.9582\n",
      "test_acc: 97.9324\n",
      "Epoch: [41/50], Step: [100/1484], Loss: 0.0647\n",
      "Epoch: [41/50], Step: [200/1484], Loss: 0.0655\n",
      "Epoch: [41/50], Step: [300/1484], Loss: 0.0724\n",
      "Epoch: [41/50], Step: [400/1484], Loss: 0.0692\n",
      "Epoch: [41/50], Step: [500/1484], Loss: 0.0791\n",
      "Epoch: [41/50], Step: [600/1484], Loss: 0.0688\n",
      "Epoch: [41/50], Step: [700/1484], Loss: 0.0751\n",
      "Epoch: [41/50], Step: [800/1484], Loss: 0.0742\n",
      "Epoch: [41/50], Step: [900/1484], Loss: 0.0733\n",
      "Epoch: [41/50], Step: [1000/1484], Loss: 0.0773\n",
      "Epoch: [41/50], Step: [1100/1484], Loss: 0.0801\n",
      "Epoch: [41/50], Step: [1200/1484], Loss: 0.0663\n",
      "Epoch: [41/50], Step: [1300/1484], Loss: 0.0750\n",
      "Epoch: [41/50], Step: [1400/1484], Loss: 0.0681\n",
      "train_acc: 97.3922\n",
      "test_acc: 97.3725\n",
      "Epoch: [42/50], Step: [100/1484], Loss: 0.0730\n",
      "Epoch: [42/50], Step: [200/1484], Loss: 0.0669\n",
      "Epoch: [42/50], Step: [300/1484], Loss: 0.0750\n",
      "Epoch: [42/50], Step: [400/1484], Loss: 0.0648\n",
      "Epoch: [42/50], Step: [500/1484], Loss: 0.0639\n",
      "Epoch: [42/50], Step: [600/1484], Loss: 0.0701\n",
      "Epoch: [42/50], Step: [700/1484], Loss: 0.0769\n",
      "Epoch: [42/50], Step: [800/1484], Loss: 0.0726\n",
      "Epoch: [42/50], Step: [900/1484], Loss: 0.0730\n",
      "Epoch: [42/50], Step: [1000/1484], Loss: 0.0704\n",
      "Epoch: [42/50], Step: [1100/1484], Loss: 0.0687\n",
      "Epoch: [42/50], Step: [1200/1484], Loss: 0.0767\n",
      "Epoch: [42/50], Step: [1300/1484], Loss: 0.0699\n",
      "Epoch: [42/50], Step: [1400/1484], Loss: 0.0715\n",
      "train_acc: 97.7628\n",
      "test_acc: 97.7355\n",
      "Epoch: [43/50], Step: [100/1484], Loss: 0.0714\n",
      "Epoch: [43/50], Step: [200/1484], Loss: 0.0696\n",
      "Epoch: [43/50], Step: [300/1484], Loss: 0.0722\n",
      "Epoch: [43/50], Step: [400/1484], Loss: 0.0658\n",
      "Epoch: [43/50], Step: [500/1484], Loss: 0.0784\n",
      "Epoch: [43/50], Step: [600/1484], Loss: 0.0685\n",
      "Epoch: [43/50], Step: [700/1484], Loss: 0.0736\n",
      "Epoch: [43/50], Step: [800/1484], Loss: 0.0646\n",
      "Epoch: [43/50], Step: [900/1484], Loss: 0.0641\n",
      "Epoch: [43/50], Step: [1000/1484], Loss: 0.0789\n",
      "Epoch: [43/50], Step: [1100/1484], Loss: 0.0697\n",
      "Epoch: [43/50], Step: [1200/1484], Loss: 0.0724\n",
      "Epoch: [43/50], Step: [1300/1484], Loss: 0.0644\n",
      "Epoch: [43/50], Step: [1400/1484], Loss: 0.0736\n",
      "train_acc: 97.9111\n",
      "test_acc: 97.8888\n",
      "Epoch: [44/50], Step: [100/1484], Loss: 0.0706\n",
      "Epoch: [44/50], Step: [200/1484], Loss: 0.0687\n",
      "Epoch: [44/50], Step: [300/1484], Loss: 0.0710\n",
      "Epoch: [44/50], Step: [400/1484], Loss: 0.0708\n",
      "Epoch: [44/50], Step: [500/1484], Loss: 0.0665\n",
      "Epoch: [44/50], Step: [600/1484], Loss: 0.0696\n",
      "Epoch: [44/50], Step: [700/1484], Loss: 0.0690\n",
      "Epoch: [44/50], Step: [800/1484], Loss: 0.0687\n",
      "Epoch: [44/50], Step: [900/1484], Loss: 0.0683\n",
      "Epoch: [44/50], Step: [1000/1484], Loss: 0.0712\n",
      "Epoch: [44/50], Step: [1100/1484], Loss: 0.0685\n",
      "Epoch: [44/50], Step: [1200/1484], Loss: 0.0753\n",
      "Epoch: [44/50], Step: [1300/1484], Loss: 0.0728\n",
      "Epoch: [44/50], Step: [1400/1484], Loss: 0.0707\n",
      "train_acc: 97.9865\n",
      "test_acc: 98.0010\n",
      "Epoch: [45/50], Step: [100/1484], Loss: 0.0716\n",
      "Epoch: [45/50], Step: [200/1484], Loss: 0.0676\n",
      "Epoch: [45/50], Step: [300/1484], Loss: 0.0664\n",
      "Epoch: [45/50], Step: [400/1484], Loss: 0.0779\n",
      "Epoch: [45/50], Step: [500/1484], Loss: 0.0755\n",
      "Epoch: [45/50], Step: [600/1484], Loss: 0.0702\n",
      "Epoch: [45/50], Step: [700/1484], Loss: 0.0715\n",
      "Epoch: [45/50], Step: [800/1484], Loss: 0.0651\n",
      "Epoch: [45/50], Step: [900/1484], Loss: 0.0659\n",
      "Epoch: [45/50], Step: [1000/1484], Loss: 0.0707\n",
      "Epoch: [45/50], Step: [1100/1484], Loss: 0.0634\n",
      "Epoch: [45/50], Step: [1200/1484], Loss: 0.0684\n",
      "Epoch: [45/50], Step: [1300/1484], Loss: 0.0700\n",
      "Epoch: [45/50], Step: [1400/1484], Loss: 0.0665\n",
      "train_acc: 97.8733\n",
      "test_acc: 97.8698\n",
      "Epoch: [46/50], Step: [100/1484], Loss: 0.0663\n",
      "Epoch: [46/50], Step: [200/1484], Loss: 0.0705\n",
      "Epoch: [46/50], Step: [300/1484], Loss: 0.0705\n",
      "Epoch: [46/50], Step: [400/1484], Loss: 0.0674\n",
      "Epoch: [46/50], Step: [500/1484], Loss: 0.0726\n",
      "Epoch: [46/50], Step: [600/1484], Loss: 0.0729\n",
      "Epoch: [46/50], Step: [700/1484], Loss: 0.0653\n",
      "Epoch: [46/50], Step: [800/1484], Loss: 0.0661\n",
      "Epoch: [46/50], Step: [900/1484], Loss: 0.0693\n",
      "Epoch: [46/50], Step: [1000/1484], Loss: 0.0662\n",
      "Epoch: [46/50], Step: [1100/1484], Loss: 0.0701\n",
      "Epoch: [46/50], Step: [1200/1484], Loss: 0.0650\n",
      "Epoch: [46/50], Step: [1300/1484], Loss: 0.0735\n",
      "Epoch: [46/50], Step: [1400/1484], Loss: 0.0649\n",
      "train_acc: 98.0485\n",
      "test_acc: 98.0659\n",
      "Epoch: [47/50], Step: [100/1484], Loss: 0.0724\n",
      "Epoch: [47/50], Step: [200/1484], Loss: 0.0655\n",
      "Epoch: [47/50], Step: [300/1484], Loss: 0.0689\n",
      "Epoch: [47/50], Step: [400/1484], Loss: 0.0716\n",
      "Epoch: [47/50], Step: [500/1484], Loss: 0.0713\n",
      "Epoch: [47/50], Step: [600/1484], Loss: 0.0676\n",
      "Epoch: [47/50], Step: [700/1484], Loss: 0.0710\n",
      "Epoch: [47/50], Step: [800/1484], Loss: 0.0748\n",
      "Epoch: [47/50], Step: [900/1484], Loss: 0.0686\n",
      "Epoch: [47/50], Step: [1000/1484], Loss: 0.0702\n",
      "Epoch: [47/50], Step: [1100/1484], Loss: 0.0662\n",
      "Epoch: [47/50], Step: [1200/1484], Loss: 0.0674\n",
      "Epoch: [47/50], Step: [1300/1484], Loss: 0.0672\n",
      "Epoch: [47/50], Step: [1400/1484], Loss: 0.0666\n",
      "train_acc: 97.9616\n",
      "test_acc: 97.9493\n",
      "Epoch: [48/50], Step: [100/1484], Loss: 0.0638\n",
      "Epoch: [48/50], Step: [200/1484], Loss: 0.0675\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: [48/50], Step: [300/1484], Loss: 0.0732\n",
      "Epoch: [48/50], Step: [400/1484], Loss: 0.0684\n",
      "Epoch: [48/50], Step: [500/1484], Loss: 0.0719\n",
      "Epoch: [48/50], Step: [600/1484], Loss: 0.0662\n",
      "Epoch: [48/50], Step: [700/1484], Loss: 0.0628\n",
      "Epoch: [48/50], Step: [800/1484], Loss: 0.0656\n",
      "Epoch: [48/50], Step: [900/1484], Loss: 0.0681\n",
      "Epoch: [48/50], Step: [1000/1484], Loss: 0.0728\n",
      "Epoch: [48/50], Step: [1100/1484], Loss: 0.0655\n",
      "Epoch: [48/50], Step: [1200/1484], Loss: 0.0717\n",
      "Epoch: [48/50], Step: [1300/1484], Loss: 0.0759\n",
      "Epoch: [48/50], Step: [1400/1484], Loss: 0.0680\n",
      "train_acc: 97.6786\n",
      "test_acc: 97.6752\n",
      "Epoch: [49/50], Step: [100/1484], Loss: 0.0650\n",
      "Epoch: [49/50], Step: [200/1484], Loss: 0.0635\n",
      "Epoch: [49/50], Step: [300/1484], Loss: 0.0682\n",
      "Epoch: [49/50], Step: [400/1484], Loss: 0.0675\n",
      "Epoch: [49/50], Step: [500/1484], Loss: 0.0647\n",
      "Epoch: [49/50], Step: [600/1484], Loss: 0.0715\n",
      "Epoch: [49/50], Step: [700/1484], Loss: 0.0682\n",
      "Epoch: [49/50], Step: [800/1484], Loss: 0.0646\n",
      "Epoch: [49/50], Step: [900/1484], Loss: 0.0704\n",
      "Epoch: [49/50], Step: [1000/1484], Loss: 0.0696\n",
      "Epoch: [49/50], Step: [1100/1484], Loss: 0.0634\n",
      "Epoch: [49/50], Step: [1200/1484], Loss: 0.0676\n",
      "Epoch: [49/50], Step: [1300/1484], Loss: 0.0667\n",
      "Epoch: [49/50], Step: [1400/1484], Loss: 0.0631\n",
      "train_acc: 98.1078\n",
      "test_acc: 98.1146\n",
      "Epoch: [50/50], Step: [100/1484], Loss: 0.0678\n",
      "Epoch: [50/50], Step: [200/1484], Loss: 0.0777\n",
      "Epoch: [50/50], Step: [300/1484], Loss: 0.0625\n",
      "Epoch: [50/50], Step: [400/1484], Loss: 0.0633\n",
      "Epoch: [50/50], Step: [500/1484], Loss: 0.0674\n",
      "Epoch: [50/50], Step: [600/1484], Loss: 0.0668\n",
      "Epoch: [50/50], Step: [700/1484], Loss: 0.0678\n",
      "Epoch: [50/50], Step: [800/1484], Loss: 0.0653\n",
      "Epoch: [50/50], Step: [900/1484], Loss: 0.0685\n",
      "Epoch: [50/50], Step: [1000/1484], Loss: 0.0679\n",
      "Epoch: [50/50], Step: [1100/1484], Loss: 0.0732\n",
      "Epoch: [50/50], Step: [1200/1484], Loss: 0.0699\n",
      "Epoch: [50/50], Step: [1300/1484], Loss: 0.0596\n",
      "Epoch: [50/50], Step: [1400/1484], Loss: 0.0736\n",
      "train_acc: 97.7163\n",
      "test_acc: 97.6828\n"
     ]
    }
   ],
   "source": [
    "# Train the model \n",
    "\n",
    "total_step = len(trainloader)\n",
    "for epoch in range(num_epochs):\n",
    "    running_loss = 0.0\n",
    "    for i, (x, y) in enumerate(trainloader):\n",
    "        # Move tensors to the configured device\n",
    "        x = x.to(device)\n",
    "        y = y.to(device)\n",
    "        \n",
    "        #print(\"batch_x: \", type(x), x.size())\n",
    "        #print(\"batch_y: \", type(y), y.size())\n",
    "        #print(x)\n",
    "        #print(y)\n",
    "        \n",
    "        # Forward pass\n",
    "        outputs = model(x)\n",
    "        #print(outputs)\n",
    "        loss = criterion(outputs,y)\n",
    "        \n",
    "        \n",
    "        # Backward and optimize\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        running_loss += loss.item() \n",
    "        if (i+1) % 100 == 0:\n",
    "            print('Epoch: [{}/{}], Step: [{}/{}], Loss: {:.4f}'\n",
    "                  .format(epoch+1, num_epochs, i+1, total_step, running_loss/100))\n",
    "            running_loss = 0.0\n",
    "        lr_scheduler.step()\n",
    "    print(\"train_acc: {:.4f}\".format(test(trainloader)))\n",
    "    print(\"test_acc: {:.4f}\".format(test(testloader)))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
