{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "a10_save.ipynb",
      "provenance": [],
      "collapsed_sections": [],
      "machine_shape": "hm"
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "kAZaRbZ3bt5a",
        "colab_type": "code",
        "outputId": "60a1dea5-d836-44f4-d9b6-a1a4fbf27767",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 90
        }
      },
      "source": [
        "import torch\n",
        "from torchvision import transforms, datasets\n",
        "import numpy as np\n",
        "import timeit\n",
        "from collections import OrderedDict\n",
        "from pprint import pformat\n",
        "from torch.utils.data.sampler import *\n",
        "from tqdm import tqdm\n",
        "import time\n",
        "from google.colab import drive\n",
        "from torch.utils import data\n",
        "import torch.utils.data as utils\n",
        "import torch.nn as nn\n",
        "import torch.nn.functional as F\n",
        "import os\n",
        "import time\n",
        "\n",
        "from google.colab import drive\n",
        "drive.mount('/content/drive')\n",
        "\n",
        "\n",
        "use_cuda=True\n",
        "print(\"CUDA Available: \",torch.cuda.is_available())\n",
        "device = torch.device(\"cuda\" if (use_cuda and torch.cuda.is_available()) else \"cpu\")\n",
        "\n",
        "\n",
        "batch_size = 100\n",
        "learning_rate = 0.0006\n",
        "weight_decay = 0.00001\n",
        "n_epochs = 80\n",
        "log_interval = 100\n",
        "\n",
        "random_seed = 1\n",
        "torch.manual_seed(random_seed)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n",
            "CUDA Available:  True\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "<torch._C.Generator at 0x7f8bd09957b0>"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 27
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QeuoFkydHZzJ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def load_data():\n",
        "    root_dir = '/content/drive/My Drive/Colab Notebooks/a10'\n",
        "    x_train = np.load(os.path.join(root_dir, 'train_X.npy')).astype('float32')/255\n",
        "    x_train = np.reshape(x_train, [55000, 64, 64, 1])\n",
        "    y_train = np.load(os.path.join(root_dir, 'train_Y.npy'))\n",
        "    bbox_train = np.load(os.path.join(root_dir, 'train_bboxes.npy'))      #(55000,2,4)\n",
        "    x_valid = np.load(os.path.join(root_dir, 'valid_X.npy')).astype('float32')/255\n",
        "    x_valid = np.reshape(x_valid, [5000, 64, 64, 1])\n",
        "    y_valid = np.load(os.path.join(root_dir, 'valid_Y.npy'))\n",
        "    bbox_valid = np.load(os.path.join(root_dir, 'valid_bboxes.npy'))   \n",
        "\n",
        "    #print(bbox_train.shape)\n",
        "\n",
        "    x_train = torch.tensor(x_train, dtype=torch.float32)#.to(device)\n",
        "    #print(type(x_train))\n",
        "    y_train = torch.tensor(y_train, dtype=torch.long)#.to(device)\n",
        "    bbox_train = torch.tensor(bbox_train, dtype=torch.float32)#.to(device)\n",
        "    bbox_train = bbox_train.view(bbox_train.size(0), -1)\n",
        "    x_valid = torch.tensor(x_valid, dtype=torch.float32)#.to(device)\n",
        "    y_valid = torch.tensor(y_valid, dtype=torch.long)#.to(device)\n",
        "    bbox_valid = torch.tensor(bbox_valid, dtype=torch.float32)#.to(device)\n",
        "\n",
        "    train_dataset = utils.TensorDataset(x_train,y_train)\n",
        "    valid_dataset = utils.TensorDataset(x_valid,y_valid)\n",
        "    bbox_dataset = utils.TensorDataset(x_train, bbox_train)\n",
        "    bbox_dataset_v = utils.TensorDataset(x_valid, bbox_valid)\n",
        "\n",
        "    train_loader = utils.DataLoader(train_dataset,batch_size=batch_size, shuffle=True)\n",
        "    valid_loader = utils.DataLoader(valid_dataset,batch_size=batch_size, shuffle=True)\n",
        "    bbox_loader = utils.DataLoader(bbox_dataset,batch_size=batch_size, shuffle=True)\n",
        "    bbox_loader_v = utils.DataLoader(bbox_dataset_v,batch_size=batch_size, shuffle=True)\n",
        "\n",
        "    #examples = enumerate(train_loader)\n",
        "    #batch_idx, example_data = next(examples)\n",
        "    #print(example_data[0][0].shape)     #example_data[0][0] is a single img\n",
        "    #print(example_targets.shape)\n",
        "\n",
        "  \n",
        "    return train_loader, valid_loader, bbox_loader, bbox_loader_v\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "OmbH_tzq0JZy",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "'''\n",
        "net for box coor, some ideas\n",
        "1.conv net(AlexNet, or VGG)\n",
        "2. Attach new fully-connected “regression head” to the network, i.e linear output last layer\n",
        "'''\n",
        "class Box_Net(nn.Module):\n",
        "    def __init__(self):\n",
        "      super(Box_Net, self).__init__()\n",
        "\n",
        "        #'''formula for output size = (n+2p-filter)/s + 1, n=input img size, p=padding, s=stride,\n",
        "        #for max pool output size size = (n-f)/s +1'''\n",
        "      self.conv1 = nn.Conv2d(1, 64, kernel_size=3,padding=1)\n",
        "      self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3,padding=1)\n",
        "      self.conv1_3 = nn.Conv2d(64, 64, kernel_size=3,padding=1)         \n",
        "      nn.init.kaiming_normal_(self.conv1.weight)\n",
        "      nn.init.kaiming_normal_(self.conv1_2.weight)\n",
        "      nn.init.kaiming_normal_(self.conv1_3.weight)\n",
        "      self.conv1_bn = nn.BatchNorm2d(64)\n",
        "\n",
        "      self.conv2 = nn.Conv2d(64, 128, kernel_size=3,padding=1)\n",
        "      self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3,padding=1)\n",
        "      self.conv2_3 = nn.Conv2d(128, 128, kernel_size=3,padding=1)         \n",
        "      nn.init.kaiming_normal_(self.conv2.weight)\n",
        "      nn.init.kaiming_normal_(self.conv2_2.weight)\n",
        "      nn.init.kaiming_normal_(self.conv2_3.weight)\n",
        "      self.conv2_bn = nn.BatchNorm2d(128)\n",
        "\n",
        "      self.conv3 = nn.Conv2d(128, 256, kernel_size=3,padding=1)\n",
        "      self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3,padding=1)\n",
        "      self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3,padding=1)          \n",
        "      nn.init.kaiming_normal_(self.conv3.weight)\n",
        "      nn.init.kaiming_normal_(self.conv3_2.weight)\n",
        "      nn.init.kaiming_normal_(self.conv3_3.weight)\n",
        "      self.conv3_bn = nn.BatchNorm2d(256)\n",
        "\n",
        "      self.conv4 = nn.Conv2d(256, 512, kernel_size=3,padding=1)\n",
        "      self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3,padding=1)\n",
        "      self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3,padding=1)          \n",
        "      nn.init.kaiming_normal_(self.conv4.weight)\n",
        "      nn.init.kaiming_normal_(self.conv4_2.weight)\n",
        "      nn.init.kaiming_normal_(self.conv4_3.weight)\n",
        "      self.conv4_bn = nn.BatchNorm2d(512)\n",
        "\n",
        "      self.conv5 = nn.Conv2d(512, 512, kernel_size=3,padding=1)\n",
        "      self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3,padding=1)\n",
        "      self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3,padding=1)          \n",
        "      nn.init.kaiming_normal_(self.conv5.weight)\n",
        "      nn.init.kaiming_normal_(self.conv5_2.weight)\n",
        "      nn.init.kaiming_normal_(self.conv5_3.weight)\n",
        "      self.conv5_bn = nn.BatchNorm2d(512)\n",
        "      self.bn5_2 = nn.BatchNorm2d(512)\n",
        "\n",
        "      self.pool = nn.MaxPool2d(kernel_size=3, stride=2)\n",
        "      #output size before conv layer: 1*1*512\n",
        "      \n",
        "      self.fc1 = nn.Linear(512 * 1 * 1, 512)\n",
        "      self.fc1_bn = nn.BatchNorm1d(512)\n",
        "      self.fc2 = nn.Linear(512, 512)\n",
        "      self.fc2_bn = nn.BatchNorm1d(512)\n",
        "      self.fc3 = nn.Linear(512, 512)\n",
        "      self.fc3_bn = nn.BatchNorm1d(512)\n",
        "      self.fc4 = nn.Linear(512, 8)\n",
        "      \n",
        "    \n",
        "    def forward(self, x):\n",
        "\n",
        "      x = F.relu(self.conv1(x))\n",
        "      x = F.relu(self.conv1_2(x))\n",
        "      x = F.relu(self.conv1_bn(self.conv1_3(x)))\n",
        "      x = self.pool(x)\n",
        "\n",
        "      x = F.relu(self.conv2(x))\n",
        "      x = F.relu(self.conv2_2(x))\n",
        "      x = F.relu(self.conv2_bn(self.conv2_3(x)))\n",
        "      x = self.pool(x)\n",
        "\n",
        "      x = F.relu(self.conv3(x))\n",
        "      x = F.relu(self.conv3_2(x))\n",
        "      x = F.relu(self.conv3_bn(self.conv3_3(x)))\n",
        "      x = self.pool(x)\n",
        "\n",
        "      x = F.relu(self.conv4(x))\n",
        "      x = F.relu(self.conv4_2(x))\n",
        "      x = F.relu(self.conv4_bn(self.conv4_3(x)))\n",
        "      x = self.pool(x)\n",
        "\n",
        "      x = F.relu(self.conv5(x))\n",
        "      x = F.relu(self.conv5_2(x))\n",
        "      x = F.relu(self.conv5_bn(self.conv5_3(x)))\n",
        "      x = self.pool(x)\n",
        "\n",
        "      x = x.view(-1, 512 * 1 * 1) \n",
        "      x = F.relu(self.fc1_bn(self.fc1(x)))\n",
        "      x = F.relu(self.fc2_bn(self.fc2(x)))\n",
        "      x = F.relu(self.fc3_bn(self.fc3(x)))\n",
        "      x = self.fc4(x)\n",
        "\n",
        "      return x\n",
        "      "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "CYBZI2Wu8BKS",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "#2 classes output,\n",
        "#y label should convert to one-hot\n",
        "# try classic cnn-architectures like alex, vgg\n",
        "#VGG net, (conv, conv, maxpool)*2 -> (conv,conv,conv, pool)*3 -> fc*3\n",
        "# batch norm usually after conv or fc, and before activation func\n",
        "class Cla_Net(nn.Module):\n",
        "  \n",
        "  def __init__(self):\n",
        "    super(Cla_Net, self).__init__()\n",
        "        \n",
        "    self.conv1 = nn.Conv2d(1, 64, kernel_size=3,padding=1)\n",
        "    self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3,padding=0)         \n",
        "    nn.init.kaiming_normal_(self.conv1.weight)\n",
        "    nn.init.kaiming_normal_(self.conv1_2.weight)\n",
        "    self.conv1_bn = nn.BatchNorm2d(64)\n",
        "\n",
        "    self.conv2 = nn.Conv2d(64, 128, kernel_size=3,padding=1)\n",
        "    self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3,padding=0)         \n",
        "    nn.init.kaiming_normal_(self.conv2.weight)\n",
        "    nn.init.kaiming_normal_(self.conv2_2.weight)\n",
        "    self.conv2_bn = nn.BatchNorm2d(128)\n",
        "\n",
        "    self.conv3 = nn.Conv2d(128, 256, kernel_size=3,padding=1)\n",
        "    self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3,padding=1)\n",
        "    self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3,padding=0)          \n",
        "    nn.init.kaiming_normal_(self.conv3.weight)\n",
        "    nn.init.kaiming_normal_(self.conv3_2.weight)\n",
        "    nn.init.kaiming_normal_(self.conv3_3.weight)\n",
        "    self.conv3_bn = nn.BatchNorm2d(256)\n",
        "\n",
        "    self.conv4 = nn.Conv2d(256, 512, kernel_size=3,padding=1)\n",
        "    self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3,padding=1)\n",
        "    self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3,padding=0)          \n",
        "    nn.init.kaiming_normal_(self.conv4.weight)\n",
        "    nn.init.kaiming_normal_(self.conv4_2.weight)\n",
        "    nn.init.kaiming_normal_(self.conv4_3.weight)\n",
        "    self.conv4_bn = nn.BatchNorm2d(512)\n",
        "\n",
        "    self.pool = nn.MaxPool2d(kernel_size=3, stride=2)\n",
        "    #output size before conv layer: 1*1*512\n",
        "    \n",
        "    self.fc1 = nn.Linear(512 * 1 * 1, 512)\n",
        "    self.fc1_bn = nn.BatchNorm1d(512)\n",
        "    self.fc2 = nn.Linear(512, 512)\n",
        "    self.fc2_bn = nn.BatchNorm1d(512)\n",
        "    self.fc3 = nn.Linear(512, 20)\n",
        "  \n",
        "  def forward(self, x):\n",
        "\n",
        "    x = F.relu(self.conv1(x))\n",
        "    x = F.relu(self.conv1_bn(self.conv1_2(x)))\n",
        "    x = self.pool(x)\n",
        "\n",
        "    x = F.relu(self.conv2(x))\n",
        "    x = F.relu(self.conv2_bn(self.conv2_2(x)))\n",
        "    x = self.pool(x)\n",
        "\n",
        "    x = F.relu(self.conv3(x))\n",
        "    x = F.relu(self.conv3_2(x))\n",
        "    x = F.relu(self.conv3_bn(self.conv3_3(x)))\n",
        "    x = self.pool(x)\n",
        "\n",
        "    x = F.relu(self.conv4(x))\n",
        "    x = F.relu(self.conv4_2(x))\n",
        "    x = F.relu(self.conv4_bn(self.conv4_3(x)))\n",
        "    x = self.pool(x)\n",
        "\n",
        "    x = x.view(-1, 512 * 1 * 1) \n",
        "    x = F.relu(self.fc1_bn(self.fc1(x)))\n",
        "    x = F.relu(self.fc2_bn(self.fc2(x)))\n",
        "    x = self.fc3(x)\n",
        "    x = F.softmax(x,dim=1)\n",
        "\n",
        "    return x\n",
        "        "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vXga5OOYa9Xx",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def train_boxes(epoch):\n",
        "    \n",
        "    box_net.train()\n",
        "    \n",
        "    _, _ , bbox_loader, _  = load_data()\n",
        "\n",
        "    train_losses = []\n",
        "    train_counter = []\n",
        "    #validation_losses = []\n",
        "    #validation_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]\n",
        "\n",
        "    print('training boxes...')\n",
        "    \n",
        "    MSE_loss = nn.MSELoss()  #need to predefine mes loss like this, otherwise report error\n",
        "    #or uuse l1 \n",
        "    L1_loss = nn.L1Loss()\n",
        "\n",
        "    optimizer = torch.optim.Adam(box_net.parameters(), \n",
        "                                 lr=learning_rate, weight_decay=weight_decay)\n",
        "\n",
        "    for batch_idx, (data, target) in enumerate(bbox_loader):\n",
        "      \n",
        "      data = data.to(device)\n",
        "      #target = target.to(device)\n",
        "      target = torch.reshape(target, (-1 , 8))\n",
        "      target = torch.FloatTensor(target).squeeze().to(device)\n",
        "      data = data.permute(0, 3, 1, 2)\n",
        "\n",
        "      optimizer.zero_grad()\n",
        "      output = box_net(data)\n",
        "    \n",
        "      loss = MSE_loss(output, target)\n",
        "      #loss = L1_loss(output, target)\n",
        "      loss.backward()\n",
        "      optimizer.step()\n",
        "      \n",
        "      if batch_idx % log_interval == 0:\n",
        "        print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n",
        "          epoch, batch_idx * len(data), len(bbox_loader.dataset),\n",
        "          100. * batch_idx / len(bbox_loader), loss.item()))\n",
        "        train_losses.append(loss.item())\n",
        "        train_counter.append(\n",
        "          (batch_idx*64) + ((epoch-1)*len(bbox_loader.dataset)))\n",
        "        \n",
        "\n",
        "  \n",
        "    PATH = '/content/drive/My Drive/Colab Notebooks/a10/box_net.pth'\n",
        "    torch.save(box_net.state_dict(), PATH)\n",
        "    print('save model..')\n",
        "    \n",
        "\n",
        "    "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "OqIbyPDMWE8h",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def train_class(epoch):\n",
        "\n",
        "    cla_net.train()\n",
        "    train_loader, _ , _, _  = load_data()\n",
        "\n",
        "    train_losses = []\n",
        "    train_counter = []\n",
        "\n",
        "    print('training classes ...')\n",
        "\n",
        "    criterion = nn.BCELoss()\n",
        "    optimizer = torch.optim.Adam(cla_net.parameters(), \n",
        "                                 lr=learning_rate, weight_decay=weight_decay)\n",
        "\n",
        "    for batch_idx, (data, target) in enumerate(train_loader):\n",
        "      \n",
        "      #print(type(data))\n",
        "      data = data.to(device)\n",
        "      data = data.permute(0, 3, 1, 2)\n",
        "      target = F.one_hot(target.long(), num_classes=10).to(device)\n",
        "      target = target.float()\n",
        "\n",
        "      #print(target.shape)\n",
        "     \n",
        "      #print('data' + str(data.shape))\n",
        "      #print('target '+str(target.shape))\n",
        "      #print(target[0])\n",
        "\n",
        "    \n",
        "      optimizer.zero_grad()\n",
        "      \n",
        "      output = cla_net(data)\n",
        "      #output = nn.LogSoftmax(output,dim=1)\n",
        "      \n",
        "      #print(output.shape)\n",
        "      loss = criterion(output, target)\n",
        "      loss.backward()\n",
        "      optimizer.step()\n",
        "      \n",
        "      if batch_idx % log_interval == 0:\n",
        "        print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n",
        "          epoch, batch_idx * len(data), len(train_loader.dataset),\n",
        "          100. * batch_idx / len(train_loader), loss.item()))\n",
        "        train_losses.append(loss.item())\n",
        "        train_counter.append(\n",
        "          (batch_idx*256) + ((epoch-1)*len(train_loader.dataset)))\n",
        "        \n",
        "      PATH = '/content/drive/My Drive/Colab Notebooks/a10/cla_net.pth'\n",
        "      torch.save(cla_net.state_dict(), PATH)\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ofeLCfHWHUd5",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Z81rjeAQvj8T",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def validation_cla():\n",
        "  cla_net.eval()\n",
        "  \n",
        "  \n",
        "  \n",
        "  validation_loss = 0\n",
        "  correct = 0\n",
        "  _,validation_loader, _, _ = load_data()\n",
        "  \n",
        "  validation_losses = []\n",
        "  validation_counter = [i*len(validation_loader.dataset) for i in range(n_epochs + 1)]\n",
        "  \n",
        "  criterion = nn.BCELoss()\n",
        "\n",
        "  with torch.no_grad():\n",
        "    \n",
        "    for batch_idx, (data, target) in enumerate(validation_loader):\n",
        "      data = data.to(device)\n",
        "      data = data.permute(0, 3, 1, 2)\n",
        "      \n",
        "      import matplotlib.pyplot as plt\n",
        "      plt.imshow(data[0][0].cpu().numpy())\n",
        "      \n",
        "      #print(data.shape)\n",
        "      #target = F.one_hot(target.long(), num_classes=10).to(device)\n",
        "      #target = target.float()\n",
        "      #print('tar '+ str(type(target)))\n",
        "      output = cla_net(data)\n",
        "      output = torch.reshape(output,[batch_size, 2, 10]).cpu()\n",
        "      \n",
        "      #pred_classes = torch.zeros(0)\n",
        "      pred_classes = []\n",
        "      pred_out = []\n",
        "\n",
        "      for i in range(batch_size):\n",
        "        p = output.data[i].max(1, keepdim=True)[1]  #get size 2 tensor, label for 2 classes\n",
        "        \n",
        "        p = p.reshape(2)   #to a row vector\n",
        "        p, _ = p.sort(descending=False) #right order\n",
        "        p = p.numpy()\n",
        "        #print(p)\n",
        "        pred_classes.append(p)\n",
        "\n",
        "      #print(pred_classes)\n",
        "      pred_out = np.asarray(pred_classes)\n",
        "      pred_out = pred_out.reshape(batch_size,2)\n",
        "      #print(pred_out.shape)\n",
        "      #print(type(pred_out))\n",
        "\n",
        "      # pred_one_img = [] #np.zeros(128*2)\n",
        "      # pred_class = []\n",
        "      # for i in range(batch_size):\n",
        "      #   for j in range(2):\n",
        "      #     single_label = torch.argmax(a[i][j]).item()\n",
        "          \n",
        "      #     pred_one_img.append(single_label)\n",
        "      #     print(len(pred_one_img))\n",
        "      # pred_class = np.asarray(pred_one_img)\n",
        "      # pred_class = pred_class.reshape(128,2)\n",
        "      \n",
        "      # print(pred_class.shape)\n",
        "\n",
        "\n",
        "      \n",
        "      \n",
        "\n",
        "\n",
        "\n",
        "      #validation_loss += criterion(output, target).item()\n",
        "      #pred = output.data.max(1, keepdim=True)[0]\n",
        "\n",
        "      pred = torch.from_numpy(pred_out)  #need to return naarray for testing\n",
        "      pred_np = pred_out\n",
        "      gt = target.numpy()\n",
        "\n",
        "      #acc = (pred_np == gt).astype(int).sum() / gt.size\n",
        "      #print('acc on valid is {}'.format(acc))\n",
        "\n",
        "      correct += pred.eq(target.data.view_as(pred)).sum()\n",
        "  validation_loss /= len(validation_loader.dataset)\n",
        "  validation_losses.append(validation_loss)\n",
        "  print('\\nValidation set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n",
        "    validation_loss, correct, len(validation_loader.dataset),\n",
        "    100. * correct / len(validation_loader.dataset))*2)\n",
        "  \n",
        "  acc = (pred_np == gt).astype(int).sum() / gt.size\n",
        "  print('acc on valid is {}'.format(acc))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "qZyHRpk5lmFu",
        "colab_type": "code",
        "outputId": "53aec3f9-a28c-4a78-f30b-d45d660d76e4",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "box_net = Box_Net().to(device)\n",
        "cla_net = Cla_Net().to(device)\n",
        "for epoch in range(1, n_epochs + 1):\n",
        "  #if epoch % 5 == 0:\n",
        "   #   learning_rate = learning_rate/2\n",
        "  train_boxes(epoch)\n",
        "  #train_class(epoch)\n",
        "  #validation_cla()\n",
        "  \n",
        "  \n",
        "torch.cuda.empty_cache()"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "training boxes...\n",
            "Train Epoch: 1 [0/55000 (0%)]\tLoss: 1352.434814\n",
            "Train Epoch: 1 [10000/55000 (18%)]\tLoss: 517.432007\n",
            "Train Epoch: 1 [20000/55000 (36%)]\tLoss: 161.497131\n",
            "Train Epoch: 1 [30000/55000 (55%)]\tLoss: 88.585800\n",
            "Train Epoch: 1 [40000/55000 (73%)]\tLoss: 64.589088\n",
            "Train Epoch: 1 [50000/55000 (91%)]\tLoss: 70.177467\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 2 [0/55000 (0%)]\tLoss: 63.674850\n",
            "Train Epoch: 2 [10000/55000 (18%)]\tLoss: 62.703453\n",
            "Train Epoch: 2 [20000/55000 (36%)]\tLoss: 55.181507\n",
            "Train Epoch: 2 [30000/55000 (55%)]\tLoss: 56.742870\n",
            "Train Epoch: 2 [40000/55000 (73%)]\tLoss: 66.909531\n",
            "Train Epoch: 2 [50000/55000 (91%)]\tLoss: 59.635063\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 3 [0/55000 (0%)]\tLoss: 57.983433\n",
            "Train Epoch: 3 [10000/55000 (18%)]\tLoss: 55.136471\n",
            "Train Epoch: 3 [20000/55000 (36%)]\tLoss: 55.944229\n",
            "Train Epoch: 3 [30000/55000 (55%)]\tLoss: 63.355202\n",
            "Train Epoch: 3 [40000/55000 (73%)]\tLoss: 61.537544\n",
            "Train Epoch: 3 [50000/55000 (91%)]\tLoss: 59.109550\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 4 [0/55000 (0%)]\tLoss: 61.036343\n",
            "Train Epoch: 4 [10000/55000 (18%)]\tLoss: 60.148754\n",
            "Train Epoch: 4 [20000/55000 (36%)]\tLoss: 59.587234\n",
            "Train Epoch: 4 [30000/55000 (55%)]\tLoss: 57.336338\n",
            "Train Epoch: 4 [40000/55000 (73%)]\tLoss: 63.246902\n",
            "Train Epoch: 4 [50000/55000 (91%)]\tLoss: 59.052578\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 5 [0/55000 (0%)]\tLoss: 47.044437\n",
            "Train Epoch: 5 [10000/55000 (18%)]\tLoss: 54.235477\n",
            "Train Epoch: 5 [20000/55000 (36%)]\tLoss: 56.869431\n",
            "Train Epoch: 5 [30000/55000 (55%)]\tLoss: 55.495255\n",
            "Train Epoch: 5 [40000/55000 (73%)]\tLoss: 48.055000\n",
            "Train Epoch: 5 [50000/55000 (91%)]\tLoss: 59.803261\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 6 [0/55000 (0%)]\tLoss: 42.360649\n",
            "Train Epoch: 6 [10000/55000 (18%)]\tLoss: 57.497646\n",
            "Train Epoch: 6 [20000/55000 (36%)]\tLoss: 52.754208\n",
            "Train Epoch: 6 [30000/55000 (55%)]\tLoss: 65.218079\n",
            "Train Epoch: 6 [40000/55000 (73%)]\tLoss: 55.641132\n",
            "Train Epoch: 6 [50000/55000 (91%)]\tLoss: 60.836929\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 7 [0/55000 (0%)]\tLoss: 54.714287\n",
            "Train Epoch: 7 [10000/55000 (18%)]\tLoss: 48.527187\n",
            "Train Epoch: 7 [20000/55000 (36%)]\tLoss: 45.232834\n",
            "Train Epoch: 7 [30000/55000 (55%)]\tLoss: 39.780533\n",
            "Train Epoch: 7 [40000/55000 (73%)]\tLoss: 21.646215\n",
            "Train Epoch: 7 [50000/55000 (91%)]\tLoss: 14.051981\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 8 [0/55000 (0%)]\tLoss: 17.101881\n",
            "Train Epoch: 8 [10000/55000 (18%)]\tLoss: 12.869096\n",
            "Train Epoch: 8 [20000/55000 (36%)]\tLoss: 8.102710\n",
            "Train Epoch: 8 [30000/55000 (55%)]\tLoss: 8.188937\n",
            "Train Epoch: 8 [40000/55000 (73%)]\tLoss: 9.788830\n",
            "Train Epoch: 8 [50000/55000 (91%)]\tLoss: 12.506625\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 9 [0/55000 (0%)]\tLoss: 7.221741\n",
            "Train Epoch: 9 [10000/55000 (18%)]\tLoss: 18.543781\n",
            "Train Epoch: 9 [20000/55000 (36%)]\tLoss: 16.398134\n",
            "Train Epoch: 9 [30000/55000 (55%)]\tLoss: 4.541670\n",
            "Train Epoch: 9 [40000/55000 (73%)]\tLoss: 7.532030\n",
            "Train Epoch: 9 [50000/55000 (91%)]\tLoss: 18.914473\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 10 [0/55000 (0%)]\tLoss: 13.690880\n",
            "Train Epoch: 10 [10000/55000 (18%)]\tLoss: 7.477360\n",
            "Train Epoch: 10 [20000/55000 (36%)]\tLoss: 19.142078\n",
            "Train Epoch: 10 [30000/55000 (55%)]\tLoss: 11.413358\n",
            "Train Epoch: 10 [40000/55000 (73%)]\tLoss: 8.989157\n",
            "Train Epoch: 10 [50000/55000 (91%)]\tLoss: 16.717247\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 11 [0/55000 (0%)]\tLoss: 10.863962\n",
            "Train Epoch: 11 [10000/55000 (18%)]\tLoss: 13.125974\n",
            "Train Epoch: 11 [20000/55000 (36%)]\tLoss: 7.241721\n",
            "Train Epoch: 11 [30000/55000 (55%)]\tLoss: 5.869053\n",
            "Train Epoch: 11 [40000/55000 (73%)]\tLoss: 9.458596\n",
            "Train Epoch: 11 [50000/55000 (91%)]\tLoss: 9.784156\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 12 [0/55000 (0%)]\tLoss: 10.313489\n",
            "Train Epoch: 12 [10000/55000 (18%)]\tLoss: 5.619519\n",
            "Train Epoch: 12 [20000/55000 (36%)]\tLoss: 16.058748\n",
            "Train Epoch: 12 [30000/55000 (55%)]\tLoss: 6.053122\n",
            "Train Epoch: 12 [40000/55000 (73%)]\tLoss: 6.991262\n",
            "Train Epoch: 12 [50000/55000 (91%)]\tLoss: 7.392748\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 13 [0/55000 (0%)]\tLoss: 4.728597\n",
            "Train Epoch: 13 [10000/55000 (18%)]\tLoss: 6.094201\n",
            "Train Epoch: 13 [20000/55000 (36%)]\tLoss: 8.167562\n",
            "Train Epoch: 13 [30000/55000 (55%)]\tLoss: 14.593737\n",
            "Train Epoch: 13 [40000/55000 (73%)]\tLoss: 11.206444\n",
            "Train Epoch: 13 [50000/55000 (91%)]\tLoss: 14.229609\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 14 [0/55000 (0%)]\tLoss: 10.148397\n",
            "Train Epoch: 14 [10000/55000 (18%)]\tLoss: 6.915135\n",
            "Train Epoch: 14 [20000/55000 (36%)]\tLoss: 11.388708\n",
            "Train Epoch: 14 [30000/55000 (55%)]\tLoss: 7.495542\n",
            "Train Epoch: 14 [40000/55000 (73%)]\tLoss: 7.522508\n",
            "Train Epoch: 14 [50000/55000 (91%)]\tLoss: 11.130279\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 15 [0/55000 (0%)]\tLoss: 10.468632\n",
            "Train Epoch: 15 [10000/55000 (18%)]\tLoss: 4.879228\n",
            "Train Epoch: 15 [20000/55000 (36%)]\tLoss: 9.038970\n",
            "Train Epoch: 15 [30000/55000 (55%)]\tLoss: 5.333192\n",
            "Train Epoch: 15 [40000/55000 (73%)]\tLoss: 5.730783\n",
            "Train Epoch: 15 [50000/55000 (91%)]\tLoss: 8.190876\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 16 [0/55000 (0%)]\tLoss: 14.047759\n",
            "Train Epoch: 16 [10000/55000 (18%)]\tLoss: 9.629657\n",
            "Train Epoch: 16 [20000/55000 (36%)]\tLoss: 10.315331\n",
            "Train Epoch: 16 [30000/55000 (55%)]\tLoss: 12.515450\n",
            "Train Epoch: 16 [40000/55000 (73%)]\tLoss: 4.945093\n",
            "Train Epoch: 16 [50000/55000 (91%)]\tLoss: 10.214669\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 17 [0/55000 (0%)]\tLoss: 6.254375\n",
            "Train Epoch: 17 [10000/55000 (18%)]\tLoss: 5.272164\n",
            "Train Epoch: 17 [20000/55000 (36%)]\tLoss: 9.229978\n",
            "Train Epoch: 17 [30000/55000 (55%)]\tLoss: 6.245765\n",
            "Train Epoch: 17 [40000/55000 (73%)]\tLoss: 11.011099\n",
            "Train Epoch: 17 [50000/55000 (91%)]\tLoss: 7.523198\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 18 [0/55000 (0%)]\tLoss: 3.516259\n",
            "Train Epoch: 18 [10000/55000 (18%)]\tLoss: 8.685233\n",
            "Train Epoch: 18 [20000/55000 (36%)]\tLoss: 4.423927\n",
            "Train Epoch: 18 [30000/55000 (55%)]\tLoss: 11.468952\n",
            "Train Epoch: 18 [40000/55000 (73%)]\tLoss: 8.306841\n",
            "Train Epoch: 18 [50000/55000 (91%)]\tLoss: 11.650332\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 19 [0/55000 (0%)]\tLoss: 4.281738\n",
            "Train Epoch: 19 [10000/55000 (18%)]\tLoss: 7.607309\n",
            "Train Epoch: 19 [20000/55000 (36%)]\tLoss: 5.380085\n",
            "Train Epoch: 19 [30000/55000 (55%)]\tLoss: 6.468063\n",
            "Train Epoch: 19 [40000/55000 (73%)]\tLoss: 6.266240\n",
            "Train Epoch: 19 [50000/55000 (91%)]\tLoss: 6.103017\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 20 [0/55000 (0%)]\tLoss: 8.988982\n",
            "Train Epoch: 20 [10000/55000 (18%)]\tLoss: 5.197440\n",
            "Train Epoch: 20 [20000/55000 (36%)]\tLoss: 10.562658\n",
            "Train Epoch: 20 [30000/55000 (55%)]\tLoss: 10.330112\n",
            "Train Epoch: 20 [40000/55000 (73%)]\tLoss: 13.406013\n",
            "Train Epoch: 20 [50000/55000 (91%)]\tLoss: 12.687696\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 21 [0/55000 (0%)]\tLoss: 4.885664\n",
            "Train Epoch: 21 [10000/55000 (18%)]\tLoss: 4.852671\n",
            "Train Epoch: 21 [20000/55000 (36%)]\tLoss: 5.572272\n",
            "Train Epoch: 21 [30000/55000 (55%)]\tLoss: 4.867653\n",
            "Train Epoch: 21 [40000/55000 (73%)]\tLoss: 7.494067\n",
            "Train Epoch: 21 [50000/55000 (91%)]\tLoss: 4.045190\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 22 [0/55000 (0%)]\tLoss: 7.809683\n",
            "Train Epoch: 22 [10000/55000 (18%)]\tLoss: 8.674470\n",
            "Train Epoch: 22 [20000/55000 (36%)]\tLoss: 9.131013\n",
            "Train Epoch: 22 [30000/55000 (55%)]\tLoss: 11.163351\n",
            "Train Epoch: 22 [40000/55000 (73%)]\tLoss: 4.994873\n",
            "Train Epoch: 22 [50000/55000 (91%)]\tLoss: 4.001638\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 23 [0/55000 (0%)]\tLoss: 9.089536\n",
            "Train Epoch: 23 [10000/55000 (18%)]\tLoss: 5.464524\n",
            "Train Epoch: 23 [20000/55000 (36%)]\tLoss: 5.442276\n",
            "Train Epoch: 23 [30000/55000 (55%)]\tLoss: 9.425512\n",
            "Train Epoch: 23 [40000/55000 (73%)]\tLoss: 4.831587\n",
            "Train Epoch: 23 [50000/55000 (91%)]\tLoss: 8.104157\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 24 [0/55000 (0%)]\tLoss: 5.897291\n",
            "Train Epoch: 24 [10000/55000 (18%)]\tLoss: 5.106515\n",
            "Train Epoch: 24 [20000/55000 (36%)]\tLoss: 3.950117\n",
            "Train Epoch: 24 [30000/55000 (55%)]\tLoss: 5.147250\n",
            "Train Epoch: 24 [40000/55000 (73%)]\tLoss: 9.109259\n",
            "Train Epoch: 24 [50000/55000 (91%)]\tLoss: 5.254081\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 25 [0/55000 (0%)]\tLoss: 6.109415\n",
            "Train Epoch: 25 [10000/55000 (18%)]\tLoss: 5.371425\n",
            "Train Epoch: 25 [20000/55000 (36%)]\tLoss: 5.389978\n",
            "Train Epoch: 25 [30000/55000 (55%)]\tLoss: 2.637984\n",
            "Train Epoch: 25 [40000/55000 (73%)]\tLoss: 8.463955\n",
            "Train Epoch: 25 [50000/55000 (91%)]\tLoss: 2.539505\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 26 [0/55000 (0%)]\tLoss: 3.212882\n",
            "Train Epoch: 26 [10000/55000 (18%)]\tLoss: 3.674984\n",
            "Train Epoch: 26 [20000/55000 (36%)]\tLoss: 5.182404\n",
            "Train Epoch: 26 [30000/55000 (55%)]\tLoss: 8.309785\n",
            "Train Epoch: 26 [40000/55000 (73%)]\tLoss: 5.469172\n",
            "Train Epoch: 26 [50000/55000 (91%)]\tLoss: 9.126511\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 27 [0/55000 (0%)]\tLoss: 4.656423\n",
            "Train Epoch: 27 [10000/55000 (18%)]\tLoss: 3.634447\n",
            "Train Epoch: 27 [20000/55000 (36%)]\tLoss: 11.667794\n",
            "Train Epoch: 27 [30000/55000 (55%)]\tLoss: 3.143356\n",
            "Train Epoch: 27 [40000/55000 (73%)]\tLoss: 5.563568\n",
            "Train Epoch: 27 [50000/55000 (91%)]\tLoss: 7.881035\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 28 [0/55000 (0%)]\tLoss: 3.877627\n",
            "Train Epoch: 28 [10000/55000 (18%)]\tLoss: 7.906389\n",
            "Train Epoch: 28 [20000/55000 (36%)]\tLoss: 3.741353\n",
            "Train Epoch: 28 [30000/55000 (55%)]\tLoss: 3.947837\n",
            "Train Epoch: 28 [40000/55000 (73%)]\tLoss: 2.114623\n",
            "Train Epoch: 28 [50000/55000 (91%)]\tLoss: 7.817362\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 29 [0/55000 (0%)]\tLoss: 6.342712\n",
            "Train Epoch: 29 [10000/55000 (18%)]\tLoss: 3.857242\n",
            "Train Epoch: 29 [20000/55000 (36%)]\tLoss: 5.684917\n",
            "Train Epoch: 29 [30000/55000 (55%)]\tLoss: 1.781933\n",
            "Train Epoch: 29 [40000/55000 (73%)]\tLoss: 7.691445\n",
            "Train Epoch: 29 [50000/55000 (91%)]\tLoss: 4.117341\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 30 [0/55000 (0%)]\tLoss: 2.775093\n",
            "Train Epoch: 30 [10000/55000 (18%)]\tLoss: 3.286467\n",
            "Train Epoch: 30 [20000/55000 (36%)]\tLoss: 3.244564\n",
            "Train Epoch: 30 [30000/55000 (55%)]\tLoss: 2.614017\n",
            "Train Epoch: 30 [40000/55000 (73%)]\tLoss: 5.024408\n",
            "Train Epoch: 30 [50000/55000 (91%)]\tLoss: 3.063652\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 31 [0/55000 (0%)]\tLoss: 2.390827\n",
            "Train Epoch: 31 [10000/55000 (18%)]\tLoss: 3.008299\n",
            "Train Epoch: 31 [20000/55000 (36%)]\tLoss: 3.944721\n",
            "Train Epoch: 31 [30000/55000 (55%)]\tLoss: 2.578354\n",
            "Train Epoch: 31 [40000/55000 (73%)]\tLoss: 5.008117\n",
            "Train Epoch: 31 [50000/55000 (91%)]\tLoss: 2.752846\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 32 [0/55000 (0%)]\tLoss: 2.149054\n",
            "Train Epoch: 32 [10000/55000 (18%)]\tLoss: 4.016217\n",
            "Train Epoch: 32 [20000/55000 (36%)]\tLoss: 6.291288\n",
            "Train Epoch: 32 [30000/55000 (55%)]\tLoss: 2.669674\n",
            "Train Epoch: 32 [40000/55000 (73%)]\tLoss: 1.581723\n",
            "Train Epoch: 32 [50000/55000 (91%)]\tLoss: 3.978925\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 33 [0/55000 (0%)]\tLoss: 2.185244\n",
            "Train Epoch: 33 [10000/55000 (18%)]\tLoss: 6.386498\n",
            "Train Epoch: 33 [20000/55000 (36%)]\tLoss: 3.440956\n",
            "Train Epoch: 33 [30000/55000 (55%)]\tLoss: 7.037749\n",
            "Train Epoch: 33 [40000/55000 (73%)]\tLoss: 4.563920\n",
            "Train Epoch: 33 [50000/55000 (91%)]\tLoss: 2.676091\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 34 [0/55000 (0%)]\tLoss: 6.935590\n",
            "Train Epoch: 34 [10000/55000 (18%)]\tLoss: 1.978897\n",
            "Train Epoch: 34 [20000/55000 (36%)]\tLoss: 2.424637\n",
            "Train Epoch: 34 [30000/55000 (55%)]\tLoss: 3.930061\n",
            "Train Epoch: 34 [40000/55000 (73%)]\tLoss: 6.227768\n",
            "Train Epoch: 34 [50000/55000 (91%)]\tLoss: 6.095601\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 35 [0/55000 (0%)]\tLoss: 4.178442\n",
            "Train Epoch: 35 [10000/55000 (18%)]\tLoss: 2.541761\n",
            "Train Epoch: 35 [20000/55000 (36%)]\tLoss: 4.564445\n",
            "Train Epoch: 35 [30000/55000 (55%)]\tLoss: 4.509076\n",
            "Train Epoch: 35 [40000/55000 (73%)]\tLoss: 2.141238\n",
            "Train Epoch: 35 [50000/55000 (91%)]\tLoss: 4.704729\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 36 [0/55000 (0%)]\tLoss: 4.203966\n",
            "Train Epoch: 36 [10000/55000 (18%)]\tLoss: 3.191875\n",
            "Train Epoch: 36 [20000/55000 (36%)]\tLoss: 3.502646\n",
            "Train Epoch: 36 [30000/55000 (55%)]\tLoss: 1.709111\n",
            "Train Epoch: 36 [40000/55000 (73%)]\tLoss: 4.025602\n",
            "Train Epoch: 36 [50000/55000 (91%)]\tLoss: 5.794754\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 37 [0/55000 (0%)]\tLoss: 5.278126\n",
            "Train Epoch: 37 [10000/55000 (18%)]\tLoss: 2.587144\n",
            "Train Epoch: 37 [20000/55000 (36%)]\tLoss: 2.027472\n",
            "Train Epoch: 37 [30000/55000 (55%)]\tLoss: 2.770831\n",
            "Train Epoch: 37 [40000/55000 (73%)]\tLoss: 6.715531\n",
            "Train Epoch: 37 [50000/55000 (91%)]\tLoss: 1.786744\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 38 [0/55000 (0%)]\tLoss: 1.818161\n",
            "Train Epoch: 38 [10000/55000 (18%)]\tLoss: 3.976703\n",
            "Train Epoch: 38 [20000/55000 (36%)]\tLoss: 2.550744\n",
            "Train Epoch: 38 [30000/55000 (55%)]\tLoss: 1.969680\n",
            "Train Epoch: 38 [40000/55000 (73%)]\tLoss: 6.638417\n",
            "Train Epoch: 38 [50000/55000 (91%)]\tLoss: 2.515999\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 39 [0/55000 (0%)]\tLoss: 1.233431\n",
            "Train Epoch: 39 [10000/55000 (18%)]\tLoss: 6.960493\n",
            "Train Epoch: 39 [20000/55000 (36%)]\tLoss: 5.144439\n",
            "Train Epoch: 39 [30000/55000 (55%)]\tLoss: 5.777091\n",
            "Train Epoch: 39 [40000/55000 (73%)]\tLoss: 3.396202\n",
            "Train Epoch: 39 [50000/55000 (91%)]\tLoss: 3.148065\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 40 [0/55000 (0%)]\tLoss: 1.659418\n",
            "Train Epoch: 40 [10000/55000 (18%)]\tLoss: 1.030251\n",
            "Train Epoch: 40 [20000/55000 (36%)]\tLoss: 3.917519\n",
            "Train Epoch: 40 [30000/55000 (55%)]\tLoss: 3.989598\n",
            "Train Epoch: 40 [40000/55000 (73%)]\tLoss: 1.989144\n",
            "Train Epoch: 40 [50000/55000 (91%)]\tLoss: 2.268819\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 41 [0/55000 (0%)]\tLoss: 3.280251\n",
            "Train Epoch: 41 [10000/55000 (18%)]\tLoss: 2.742455\n",
            "Train Epoch: 41 [20000/55000 (36%)]\tLoss: 2.396212\n",
            "Train Epoch: 41 [30000/55000 (55%)]\tLoss: 2.020694\n",
            "Train Epoch: 41 [40000/55000 (73%)]\tLoss: 3.719251\n",
            "Train Epoch: 41 [50000/55000 (91%)]\tLoss: 1.775120\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 42 [0/55000 (0%)]\tLoss: 3.888575\n",
            "Train Epoch: 42 [10000/55000 (18%)]\tLoss: 1.801254\n",
            "Train Epoch: 42 [20000/55000 (36%)]\tLoss: 2.059603\n",
            "Train Epoch: 42 [30000/55000 (55%)]\tLoss: 4.274700\n",
            "Train Epoch: 42 [40000/55000 (73%)]\tLoss: 1.111032\n",
            "Train Epoch: 42 [50000/55000 (91%)]\tLoss: 2.535869\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 43 [0/55000 (0%)]\tLoss: 2.075499\n",
            "Train Epoch: 43 [10000/55000 (18%)]\tLoss: 1.743045\n",
            "Train Epoch: 43 [20000/55000 (36%)]\tLoss: 3.989687\n",
            "Train Epoch: 43 [30000/55000 (55%)]\tLoss: 5.989792\n",
            "Train Epoch: 43 [40000/55000 (73%)]\tLoss: 3.840167\n",
            "Train Epoch: 43 [50000/55000 (91%)]\tLoss: 2.426607\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 44 [0/55000 (0%)]\tLoss: 5.052101\n",
            "Train Epoch: 44 [10000/55000 (18%)]\tLoss: 1.093453\n",
            "Train Epoch: 44 [20000/55000 (36%)]\tLoss: 1.577238\n",
            "Train Epoch: 44 [30000/55000 (55%)]\tLoss: 0.946162\n",
            "Train Epoch: 44 [40000/55000 (73%)]\tLoss: 2.015166\n",
            "Train Epoch: 44 [50000/55000 (91%)]\tLoss: 2.280320\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 45 [0/55000 (0%)]\tLoss: 1.555405\n",
            "Train Epoch: 45 [10000/55000 (18%)]\tLoss: 1.891613\n",
            "Train Epoch: 45 [20000/55000 (36%)]\tLoss: 7.754718\n",
            "Train Epoch: 45 [30000/55000 (55%)]\tLoss: 3.325209\n",
            "Train Epoch: 45 [40000/55000 (73%)]\tLoss: 1.364579\n",
            "Train Epoch: 45 [50000/55000 (91%)]\tLoss: 3.340002\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 46 [0/55000 (0%)]\tLoss: 1.419183\n",
            "Train Epoch: 46 [10000/55000 (18%)]\tLoss: 2.115366\n",
            "Train Epoch: 46 [20000/55000 (36%)]\tLoss: 3.474750\n",
            "Train Epoch: 46 [30000/55000 (55%)]\tLoss: 7.418877\n",
            "Train Epoch: 46 [40000/55000 (73%)]\tLoss: 1.821656\n",
            "Train Epoch: 46 [50000/55000 (91%)]\tLoss: 1.674360\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 47 [0/55000 (0%)]\tLoss: 3.318635\n",
            "Train Epoch: 47 [10000/55000 (18%)]\tLoss: 4.511138\n",
            "Train Epoch: 47 [20000/55000 (36%)]\tLoss: 2.976828\n",
            "Train Epoch: 47 [30000/55000 (55%)]\tLoss: 3.345858\n",
            "Train Epoch: 47 [40000/55000 (73%)]\tLoss: 2.889980\n",
            "Train Epoch: 47 [50000/55000 (91%)]\tLoss: 1.993805\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 48 [0/55000 (0%)]\tLoss: 2.670462\n",
            "Train Epoch: 48 [10000/55000 (18%)]\tLoss: 2.635228\n",
            "Train Epoch: 48 [20000/55000 (36%)]\tLoss: 1.458504\n",
            "Train Epoch: 48 [30000/55000 (55%)]\tLoss: 1.843966\n",
            "Train Epoch: 48 [40000/55000 (73%)]\tLoss: 3.028576\n",
            "Train Epoch: 48 [50000/55000 (91%)]\tLoss: 4.822351\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 49 [0/55000 (0%)]\tLoss: 2.742731\n",
            "Train Epoch: 49 [10000/55000 (18%)]\tLoss: 2.707335\n",
            "Train Epoch: 49 [20000/55000 (36%)]\tLoss: 1.302946\n",
            "Train Epoch: 49 [30000/55000 (55%)]\tLoss: 1.542377\n",
            "Train Epoch: 49 [40000/55000 (73%)]\tLoss: 2.335139\n",
            "Train Epoch: 49 [50000/55000 (91%)]\tLoss: 2.159946\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 50 [0/55000 (0%)]\tLoss: 3.075723\n",
            "Train Epoch: 50 [10000/55000 (18%)]\tLoss: 2.691657\n",
            "Train Epoch: 50 [20000/55000 (36%)]\tLoss: 1.260967\n",
            "Train Epoch: 50 [30000/55000 (55%)]\tLoss: 1.826565\n",
            "Train Epoch: 50 [40000/55000 (73%)]\tLoss: 1.753490\n",
            "Train Epoch: 50 [50000/55000 (91%)]\tLoss: 1.650515\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 51 [0/55000 (0%)]\tLoss: 3.171292\n",
            "Train Epoch: 51 [10000/55000 (18%)]\tLoss: 1.785088\n",
            "Train Epoch: 51 [20000/55000 (36%)]\tLoss: 1.717165\n",
            "Train Epoch: 51 [30000/55000 (55%)]\tLoss: 1.073249\n",
            "Train Epoch: 51 [40000/55000 (73%)]\tLoss: 3.349214\n",
            "Train Epoch: 51 [50000/55000 (91%)]\tLoss: 4.146330\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 52 [0/55000 (0%)]\tLoss: 2.846904\n",
            "Train Epoch: 52 [10000/55000 (18%)]\tLoss: 1.941478\n",
            "Train Epoch: 52 [20000/55000 (36%)]\tLoss: 1.697231\n",
            "Train Epoch: 52 [30000/55000 (55%)]\tLoss: 3.292429\n",
            "Train Epoch: 52 [40000/55000 (73%)]\tLoss: 2.042318\n",
            "Train Epoch: 52 [50000/55000 (91%)]\tLoss: 1.472647\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 53 [0/55000 (0%)]\tLoss: 2.086702\n",
            "Train Epoch: 53 [10000/55000 (18%)]\tLoss: 2.336768\n",
            "Train Epoch: 53 [20000/55000 (36%)]\tLoss: 2.350106\n",
            "Train Epoch: 53 [30000/55000 (55%)]\tLoss: 6.800777\n",
            "Train Epoch: 53 [40000/55000 (73%)]\tLoss: 3.296649\n",
            "Train Epoch: 53 [50000/55000 (91%)]\tLoss: 1.857819\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 54 [0/55000 (0%)]\tLoss: 2.065306\n",
            "Train Epoch: 54 [10000/55000 (18%)]\tLoss: 1.429776\n",
            "Train Epoch: 54 [20000/55000 (36%)]\tLoss: 5.533529\n",
            "Train Epoch: 54 [30000/55000 (55%)]\tLoss: 1.859116\n",
            "Train Epoch: 54 [40000/55000 (73%)]\tLoss: 2.359818\n",
            "Train Epoch: 54 [50000/55000 (91%)]\tLoss: 1.366271\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 55 [0/55000 (0%)]\tLoss: 1.162923\n",
            "Train Epoch: 55 [10000/55000 (18%)]\tLoss: 1.907876\n",
            "Train Epoch: 55 [20000/55000 (36%)]\tLoss: 1.529097\n",
            "Train Epoch: 55 [30000/55000 (55%)]\tLoss: 1.013074\n",
            "Train Epoch: 55 [40000/55000 (73%)]\tLoss: 1.366094\n",
            "Train Epoch: 55 [50000/55000 (91%)]\tLoss: 1.787008\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 56 [0/55000 (0%)]\tLoss: 1.015652\n",
            "Train Epoch: 56 [10000/55000 (18%)]\tLoss: 0.977523\n",
            "Train Epoch: 56 [20000/55000 (36%)]\tLoss: 2.840654\n",
            "Train Epoch: 56 [30000/55000 (55%)]\tLoss: 2.515715\n",
            "Train Epoch: 56 [40000/55000 (73%)]\tLoss: 1.991738\n",
            "Train Epoch: 56 [50000/55000 (91%)]\tLoss: 4.527596\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 57 [0/55000 (0%)]\tLoss: 1.283000\n",
            "Train Epoch: 57 [10000/55000 (18%)]\tLoss: 1.399379\n",
            "Train Epoch: 57 [20000/55000 (36%)]\tLoss: 2.325115\n",
            "Train Epoch: 57 [30000/55000 (55%)]\tLoss: 1.914180\n",
            "Train Epoch: 57 [40000/55000 (73%)]\tLoss: 3.072495\n",
            "Train Epoch: 57 [50000/55000 (91%)]\tLoss: 1.176866\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 58 [0/55000 (0%)]\tLoss: 1.539786\n",
            "Train Epoch: 58 [10000/55000 (18%)]\tLoss: 1.698359\n",
            "Train Epoch: 58 [20000/55000 (36%)]\tLoss: 1.028057\n",
            "Train Epoch: 58 [30000/55000 (55%)]\tLoss: 5.740402\n",
            "Train Epoch: 58 [40000/55000 (73%)]\tLoss: 1.315641\n",
            "Train Epoch: 58 [50000/55000 (91%)]\tLoss: 0.953388\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 59 [0/55000 (0%)]\tLoss: 5.623213\n",
            "Train Epoch: 59 [10000/55000 (18%)]\tLoss: 3.253676\n",
            "Train Epoch: 59 [20000/55000 (36%)]\tLoss: 1.322776\n",
            "Train Epoch: 59 [30000/55000 (55%)]\tLoss: 1.771051\n",
            "Train Epoch: 59 [40000/55000 (73%)]\tLoss: 0.860088\n",
            "Train Epoch: 59 [50000/55000 (91%)]\tLoss: 1.419193\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 60 [0/55000 (0%)]\tLoss: 2.510745\n",
            "Train Epoch: 60 [10000/55000 (18%)]\tLoss: 1.317355\n",
            "Train Epoch: 60 [20000/55000 (36%)]\tLoss: 1.331371\n",
            "Train Epoch: 60 [30000/55000 (55%)]\tLoss: 1.622808\n",
            "Train Epoch: 60 [40000/55000 (73%)]\tLoss: 1.173209\n",
            "Train Epoch: 60 [50000/55000 (91%)]\tLoss: 1.517556\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 61 [0/55000 (0%)]\tLoss: 1.051565\n",
            "Train Epoch: 61 [10000/55000 (18%)]\tLoss: 1.272682\n",
            "Train Epoch: 61 [20000/55000 (36%)]\tLoss: 1.744753\n",
            "Train Epoch: 61 [30000/55000 (55%)]\tLoss: 2.725673\n",
            "Train Epoch: 61 [40000/55000 (73%)]\tLoss: 0.861846\n",
            "Train Epoch: 61 [50000/55000 (91%)]\tLoss: 0.784445\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 62 [0/55000 (0%)]\tLoss: 0.709862\n",
            "Train Epoch: 62 [10000/55000 (18%)]\tLoss: 2.542063\n",
            "Train Epoch: 62 [20000/55000 (36%)]\tLoss: 1.459871\n",
            "Train Epoch: 62 [30000/55000 (55%)]\tLoss: 1.367734\n",
            "Train Epoch: 62 [40000/55000 (73%)]\tLoss: 2.147793\n",
            "Train Epoch: 62 [50000/55000 (91%)]\tLoss: 0.900943\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 63 [0/55000 (0%)]\tLoss: 4.057599\n",
            "Train Epoch: 63 [10000/55000 (18%)]\tLoss: 1.438331\n",
            "Train Epoch: 63 [20000/55000 (36%)]\tLoss: 0.790423\n",
            "Train Epoch: 63 [30000/55000 (55%)]\tLoss: 2.925856\n",
            "Train Epoch: 63 [40000/55000 (73%)]\tLoss: 0.732905\n",
            "Train Epoch: 63 [50000/55000 (91%)]\tLoss: 6.125959\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 64 [0/55000 (0%)]\tLoss: 1.142432\n",
            "Train Epoch: 64 [10000/55000 (18%)]\tLoss: 3.024556\n",
            "Train Epoch: 64 [20000/55000 (36%)]\tLoss: 0.715933\n",
            "Train Epoch: 64 [30000/55000 (55%)]\tLoss: 1.339450\n",
            "Train Epoch: 64 [40000/55000 (73%)]\tLoss: 1.523688\n",
            "Train Epoch: 64 [50000/55000 (91%)]\tLoss: 1.759836\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 65 [0/55000 (0%)]\tLoss: 1.659754\n",
            "Train Epoch: 65 [10000/55000 (18%)]\tLoss: 0.953998\n",
            "Train Epoch: 65 [20000/55000 (36%)]\tLoss: 2.124309\n",
            "Train Epoch: 65 [30000/55000 (55%)]\tLoss: 0.939734\n",
            "Train Epoch: 65 [40000/55000 (73%)]\tLoss: 1.591674\n",
            "Train Epoch: 65 [50000/55000 (91%)]\tLoss: 0.737897\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 66 [0/55000 (0%)]\tLoss: 0.971733\n",
            "Train Epoch: 66 [10000/55000 (18%)]\tLoss: 1.591553\n",
            "Train Epoch: 66 [20000/55000 (36%)]\tLoss: 1.227322\n",
            "Train Epoch: 66 [30000/55000 (55%)]\tLoss: 1.374563\n",
            "Train Epoch: 66 [40000/55000 (73%)]\tLoss: 1.625274\n",
            "Train Epoch: 66 [50000/55000 (91%)]\tLoss: 1.663330\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 67 [0/55000 (0%)]\tLoss: 1.096061\n",
            "Train Epoch: 67 [10000/55000 (18%)]\tLoss: 1.250142\n",
            "Train Epoch: 67 [20000/55000 (36%)]\tLoss: 0.781385\n",
            "Train Epoch: 67 [30000/55000 (55%)]\tLoss: 1.918005\n",
            "Train Epoch: 67 [40000/55000 (73%)]\tLoss: 0.751923\n",
            "Train Epoch: 67 [50000/55000 (91%)]\tLoss: 2.581486\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 68 [0/55000 (0%)]\tLoss: 2.387819\n",
            "Train Epoch: 68 [10000/55000 (18%)]\tLoss: 0.825515\n",
            "Train Epoch: 68 [20000/55000 (36%)]\tLoss: 1.060983\n",
            "Train Epoch: 68 [30000/55000 (55%)]\tLoss: 0.774441\n",
            "Train Epoch: 68 [40000/55000 (73%)]\tLoss: 0.700846\n",
            "Train Epoch: 68 [50000/55000 (91%)]\tLoss: 0.880721\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 69 [0/55000 (0%)]\tLoss: 1.622827\n",
            "Train Epoch: 69 [10000/55000 (18%)]\tLoss: 1.696220\n",
            "Train Epoch: 69 [20000/55000 (36%)]\tLoss: 2.960677\n",
            "Train Epoch: 69 [30000/55000 (55%)]\tLoss: 1.968896\n",
            "Train Epoch: 69 [40000/55000 (73%)]\tLoss: 0.826746\n",
            "Train Epoch: 69 [50000/55000 (91%)]\tLoss: 0.746533\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 70 [0/55000 (0%)]\tLoss: 0.770835\n",
            "Train Epoch: 70 [10000/55000 (18%)]\tLoss: 0.724653\n",
            "Train Epoch: 70 [20000/55000 (36%)]\tLoss: 0.754973\n",
            "Train Epoch: 70 [30000/55000 (55%)]\tLoss: 0.893863\n",
            "Train Epoch: 70 [40000/55000 (73%)]\tLoss: 1.329231\n",
            "Train Epoch: 70 [50000/55000 (91%)]\tLoss: 1.033445\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 71 [0/55000 (0%)]\tLoss: 0.633780\n",
            "Train Epoch: 71 [10000/55000 (18%)]\tLoss: 1.681337\n",
            "Train Epoch: 71 [20000/55000 (36%)]\tLoss: 1.083988\n",
            "Train Epoch: 71 [30000/55000 (55%)]\tLoss: 1.055771\n",
            "Train Epoch: 71 [40000/55000 (73%)]\tLoss: 0.733086\n",
            "Train Epoch: 71 [50000/55000 (91%)]\tLoss: 1.762547\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 72 [0/55000 (0%)]\tLoss: 3.789264\n",
            "Train Epoch: 72 [10000/55000 (18%)]\tLoss: 0.994180\n",
            "Train Epoch: 72 [20000/55000 (36%)]\tLoss: 2.432326\n",
            "Train Epoch: 72 [30000/55000 (55%)]\tLoss: 0.770411\n",
            "Train Epoch: 72 [40000/55000 (73%)]\tLoss: 1.543232\n",
            "Train Epoch: 72 [50000/55000 (91%)]\tLoss: 1.017215\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 73 [0/55000 (0%)]\tLoss: 1.380418\n",
            "Train Epoch: 73 [10000/55000 (18%)]\tLoss: 0.855369\n",
            "Train Epoch: 73 [20000/55000 (36%)]\tLoss: 0.636953\n",
            "Train Epoch: 73 [30000/55000 (55%)]\tLoss: 2.076124\n",
            "Train Epoch: 73 [40000/55000 (73%)]\tLoss: 2.008553\n",
            "Train Epoch: 73 [50000/55000 (91%)]\tLoss: 3.128460\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 74 [0/55000 (0%)]\tLoss: 1.483130\n",
            "Train Epoch: 74 [10000/55000 (18%)]\tLoss: 1.010277\n",
            "Train Epoch: 74 [20000/55000 (36%)]\tLoss: 2.917888\n",
            "Train Epoch: 74 [30000/55000 (55%)]\tLoss: 0.654276\n",
            "Train Epoch: 74 [40000/55000 (73%)]\tLoss: 1.571945\n",
            "Train Epoch: 74 [50000/55000 (91%)]\tLoss: 2.467246\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 75 [0/55000 (0%)]\tLoss: 0.679143\n",
            "Train Epoch: 75 [10000/55000 (18%)]\tLoss: 1.373254\n",
            "Train Epoch: 75 [20000/55000 (36%)]\tLoss: 1.205883\n",
            "Train Epoch: 75 [30000/55000 (55%)]\tLoss: 1.721091\n",
            "Train Epoch: 75 [40000/55000 (73%)]\tLoss: 0.843868\n",
            "Train Epoch: 75 [50000/55000 (91%)]\tLoss: 0.753557\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 76 [0/55000 (0%)]\tLoss: 0.544833\n",
            "Train Epoch: 76 [10000/55000 (18%)]\tLoss: 1.550426\n",
            "Train Epoch: 76 [20000/55000 (36%)]\tLoss: 0.918672\n",
            "Train Epoch: 76 [30000/55000 (55%)]\tLoss: 0.931931\n",
            "Train Epoch: 76 [40000/55000 (73%)]\tLoss: 0.703160\n",
            "Train Epoch: 76 [50000/55000 (91%)]\tLoss: 1.017422\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 77 [0/55000 (0%)]\tLoss: 0.702502\n",
            "Train Epoch: 77 [10000/55000 (18%)]\tLoss: 0.697279\n",
            "Train Epoch: 77 [20000/55000 (36%)]\tLoss: 1.171831\n",
            "Train Epoch: 77 [30000/55000 (55%)]\tLoss: 0.397973\n",
            "Train Epoch: 77 [40000/55000 (73%)]\tLoss: 0.729559\n",
            "Train Epoch: 77 [50000/55000 (91%)]\tLoss: 5.580601\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 78 [0/55000 (0%)]\tLoss: 1.850622\n",
            "Train Epoch: 78 [10000/55000 (18%)]\tLoss: 0.784990\n",
            "Train Epoch: 78 [20000/55000 (36%)]\tLoss: 2.024994\n",
            "Train Epoch: 78 [30000/55000 (55%)]\tLoss: 0.541855\n",
            "Train Epoch: 78 [40000/55000 (73%)]\tLoss: 0.866795\n",
            "Train Epoch: 78 [50000/55000 (91%)]\tLoss: 4.546907\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 79 [0/55000 (0%)]\tLoss: 0.867752\n",
            "Train Epoch: 79 [10000/55000 (18%)]\tLoss: 0.549889\n",
            "Train Epoch: 79 [20000/55000 (36%)]\tLoss: 0.754465\n",
            "Train Epoch: 79 [30000/55000 (55%)]\tLoss: 1.983486\n",
            "Train Epoch: 79 [40000/55000 (73%)]\tLoss: 1.428512\n",
            "Train Epoch: 79 [50000/55000 (91%)]\tLoss: 1.144479\n",
            "save model..\n",
            "training boxes...\n",
            "Train Epoch: 80 [0/55000 (0%)]\tLoss: 0.673986\n",
            "Train Epoch: 80 [10000/55000 (18%)]\tLoss: 0.701691\n",
            "Train Epoch: 80 [20000/55000 (36%)]\tLoss: 0.918081\n",
            "Train Epoch: 80 [30000/55000 (55%)]\tLoss: 0.694112\n",
            "Train Epoch: 80 [40000/55000 (73%)]\tLoss: 0.842216\n",
            "Train Epoch: 80 [50000/55000 (91%)]\tLoss: 0.809904\n",
            "save model..\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "iSDI7o-FxU03",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_BPwfxXqb1V2",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import numpy as np\n",
        "import torchvision\n",
        "from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n",
        "from torchvision import models\n",
        "\n",
        "\n",
        "def classify_and_detect(images):\n",
        "    \"\"\"\n",
        "\n",
        "    :param np.ndarray images: N x 4096 array containing N 64x64 images flattened into vectors\n",
        "    :return: np.ndarray, np.ndarray\n",
        "    first load the pre-train model, don't train cause cost time in submission!!\n",
        "    and take test images to evaluate, same as valid_clc()\n",
        "    \"\"\"\n",
        "    N = images.shape[0]\n",
        "    print('N is '+str(N))\n",
        "    images = images.astype('float32')/255\n",
        "    images = np.reshape(images, [N, 64, 64, 1])\n",
        "    images = torch.tensor(images, dtype=torch.float32)\n",
        "    aux_label = np.ones(N)\n",
        "    aux_label = torch.tensor(aux_label, dtype=torch.float32)\n",
        "    images_dataset = utils.TensorDataset(images, aux_label)\n",
        "    images_loader = utils.DataLoader(images_dataset,batch_size=100, shuffle=False)\n",
        "\n",
        "    #pred_class: Your predicted labels for the 2 digits, shape [N, 2]\n",
        "    pred_class = np.empty((N, 2), dtype=np.int32)\n",
        "    # pred_bboxes: Your predicted bboxes for 2 digits, shape [N, 2, 4]\n",
        "    pred_bboxes = np.empty((N, 2, 4), dtype=np.float64)\n",
        "\n",
        "    '''should change these before submission'''\n",
        "    PATH = '/content/drive/My Drive/Colab Notebooks/a10/cla_net.pth'\n",
        "    PATH_2 = '/content/drive/My Drive/Colab Notebooks/a10/box_net.pth'\n",
        "    \n",
        "    device = torch.device('cpu')\n",
        "    cla_net = Cla_Net().to(device)\n",
        "    cla_net.load_state_dict(torch.load(PATH, map_location=device))\n",
        "\n",
        "    box_net = Box_Net().to(device)\n",
        "    box_net.load_state_dict(torch.load(PATH_2, map_location=device))\n",
        "\n",
        "    images = images.reshape(N,64,64,1)\n",
        "\n",
        "    pred_class_list = []  #for easier to do append operation\n",
        "    pred_class_all = []\n",
        "\n",
        "    with torch.no_grad():\n",
        "      \n",
        "      for batch_idx, (data,_) in enumerate(images_loader):\n",
        "\n",
        "          data = data.to(device)\n",
        "          data = data.permute(0, 3, 1, 2)\n",
        "          output = cla_net(data)  #shape [b,20]\n",
        "      \n",
        "          output = torch.reshape(output,[batch_size, 2, 10]).cpu()  #one batch output, [100,2,10]\n",
        "\n",
        "          for i in range(batch_size):  #loop over each batch to get labels\n",
        "            \n",
        "            p = output.data[i].max(1, keepdim=True)[1]  #get size 2 tensor, label for 2 classes\n",
        "            p = p.reshape(2)   #to a row vector, p is tensor\n",
        "            p, _ = p.sort(descending=False) # sort to right order\n",
        "            p = p.numpy()\n",
        "            #print(p)\n",
        "            pred_class_list.append(p)  #this just one batch size pred, need to add all batch together\n",
        "        \n",
        "          #pred_out = np.asarray(pred_classes)\n",
        "          pred_class_batch = np.asarray(pred_class_list)  #this just for each batch, with size [b,2]\n",
        "          \n",
        "          pred_class_all.append(pred_class_list) #= np.append(pred_class, pred_class_batch)\n",
        "          \n",
        "          \n",
        "    ###now deal with the box case, you need to retrun predicted box coor, and in main() compare with gt\n",
        "    #output shape should be [N,2,4], and here each batch output is [b,8]\n",
        "    pred_box_all = []\n",
        "    pred_box_batch = torch.tensor((N,2,4))\n",
        "    \n",
        "    with torch.no_grad():\n",
        "      \n",
        "      for batch_idx, (data,_) in enumerate(images_loader):\n",
        "        data = data.to(device)\n",
        "        data = data.permute(0, 3, 1, 2)\n",
        "        output = box_net(data)  #model output shape [b,8], target->[b,2,4]\n",
        "        #output = output.reshape(batch_size,2,4).cpu()  # ->[100,2,4]\n",
        "        output = output.cpu().numpy()\n",
        "        #print(output.shape)\n",
        "        pred_box_all.append(output)\n",
        "     \n",
        "   \n",
        "    '''final processing before return'''\n",
        "    pred_class = np.asarray(pred_class_all)\n",
        "    pred_class = pred_class[0]\n",
        "    pred_class.reshape(N,2)\n",
        "\n",
        "    pred_box = np.asarray(pred_box_all)\n",
        "    #print('all ' +str(pred_box.shape))  #pred_box ->[50, 100,8] #number of bat, bat_size\n",
        "    #print(pred_box_all.shape)\n",
        "    pred_box = pred_box.reshape(N,2,4)\n",
        "    print(pred_box.shape)\n",
        "    \n",
        "\n",
        "    return pred_class, pred_box"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "rLltTWeab5IE",
        "colab_type": "code",
        "outputId": "51e20823-1fd1-45fd-f4d6-8d3ec98670a6",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 105
        }
      },
      "source": [
        "import time\n",
        "import numpy as np\n",
        "from skimage.draw import polygon\n",
        "\n",
        "#from A10_submission import classify_and_detect\n",
        "\n",
        "\n",
        "def compute_classification_acc(pred, gt):\n",
        "    # pred and gt are both\n",
        "    \n",
        "    assert pred.shape == gt.shape\n",
        "    return (pred == gt).astype(int).sum() / gt.size\n",
        "\n",
        "\n",
        "def compute_iou(b_pred, b_gt):\n",
        "    # b_pred: predicted bounding boxes, shape=(n,2,4)\n",
        "    # b_gt: ground truth bounding boxes, shape=(n,2,4)\n",
        "\n",
        "    n = np.shape(b_gt)[0]\n",
        "    L_pred = np.zeros((64, 64))\n",
        "    L_gt = np.zeros((64, 64))\n",
        "    iou = 0.0\n",
        "    for i in range(n):\n",
        "        for b in range(2):\n",
        "            rr, cc = polygon([b_pred[i, b, 0], b_pred[i, b, 0], b_pred[i, b, 2], b_pred[i, b, 2]],\n",
        "                             [b_pred[i, b, 1], b_pred[i, b, 3], b_pred[i, b, 3], b_pred[i, b, 1]], [64, 64])\n",
        "            L_pred[rr, cc] = 1\n",
        "\n",
        "            rr, cc = polygon([b_gt[i, b, 0], b_gt[i, b, 0], b_gt[i, b, 2], b_gt[i, b, 2]],\n",
        "                             [b_gt[i, b, 1], b_gt[i, b, 3], b_gt[i, b, 3], b_gt[i, b, 1]], [64, 64])\n",
        "            L_gt[rr, cc] = 1\n",
        "\n",
        "            iou += (1.0 / (2 * n)) * (np.sum((L_pred + L_gt) == 2) / np.sum((L_pred + L_gt) >= 1))\n",
        "\n",
        "            L_pred[:, :] = 0\n",
        "            L_gt[:, :] = 0\n",
        "\n",
        "    return iou\n",
        "\n",
        "\n",
        "def main():\n",
        "    # prefix = \"test\"\n",
        "    prefix = \"valid\"\n",
        "\n",
        "    images = np.load(prefix + \"_X.npy\")\n",
        "\n",
        "    start_t = time.time()\n",
        "    pred_class, pred_bboxes = classify_and_detect(images)\n",
        "    end_t = time.time()\n",
        "\n",
        "    gt_class = np.load(prefix + \"_Y.npy\")\n",
        "    gt_bboxes = np.load(prefix + \"_bboxes.npy\")\n",
        "    acc = compute_classification_acc(pred_class, gt_class)\n",
        "    iou = compute_iou(pred_bboxes, gt_bboxes)\n",
        "\n",
        "    time_taken = end_t - start_t\n",
        "\n",
        "    print(f\"Classification Acc: {acc}\")\n",
        "    print(f\"BBoxes IOU: {iou}\")\n",
        "    print(f\"Test time: {time_taken}\")\n",
        "\n",
        "\n",
        "if __name__ == '__main__':\n",
        "    main()\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "N is 5000\n",
            "(5000, 2, 4)\n",
            "Classification Acc: 0.9879\n",
            "BBoxes IOU: 0.8676772013268703\n",
            "Test time: 179.82431626319885\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MQniFImf_ryU",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "DKzCpMGjSqu_",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# a = torch.randn(128,8,requires_grad=True)\n",
        "# b = torch.randn(128,8)\n",
        "# #l = torch.nn.MSELoss(a, b)\n",
        "# #l = nn.CrossEntropyLoss(a,b)\n",
        "\n",
        "# c = torch.tensor(([1,2,3],[4,5,6]))\n",
        "# #c = c.view(-1,6)\n",
        "# b.shape\n",
        "# #c.unsqueeze(0)\n",
        "# #a[0].shape\n",
        "\n",
        "# loss = nn.MSELoss()\n",
        "# input = torch.randn(128, 8, requires_grad=True)\n",
        "# target = torch.randn(128, 8)\n",
        "# output = loss(input, target)\n",
        "# input.shape\n",
        "\n",
        "# a = torch.randint(10,(128,2,10))\n",
        "# #one = torch.nn.functional.one_hot(a,num_classes=10)\n",
        "# #torch.arange(0, 5) #% 3\n",
        "# b=torch.tensor(([3,4],[4,9],[2,2]))\n",
        "# print(a.shape)\n",
        "# #a = torch.nn.functional.one_hot(a.long(),num_classes=10)\n",
        "# #b.shape\n",
        "# a.shape\n",
        "# #print(a.data.max(1, keepdim=True)[0])\n",
        "\n",
        "\n",
        "# print(a)\n",
        "# print(a[1])\n",
        "# print(a[0][1])\n",
        "# #pred_class = np.empty((N, 2), dtype=np.int32)\n",
        "# lab = torch.argmax(a[0][0])\n",
        "# #print(lab.item())\n",
        "# #for row in a:\n",
        "# #  print(row[0])\n",
        "# pred_one_img = [] #np.zeros(128*2)\n",
        "# pred_class = []\n",
        "# for i in range(128):\n",
        "#   #print('i '+str(i))\n",
        "#   for j in range(2):\n",
        "#     #print(j)\n",
        "#     single_label = torch.argmax(a[i][j]).item()\n",
        "#     #print(single_label)\n",
        "#     #print(type(pred_one_img))\n",
        "#     pred_one_img.append(single_label)\n",
        "#     #pred_one_img.append\n",
        "\n",
        "#   #print(pred_one_img)\n",
        "#   #pred_class.append(pred_one_img)\n",
        "\n",
        "# #print(len(pred_one_img))\n",
        "# pred_class = np.asarray(pred_one_img)\n",
        "# pred_class = pred_class.reshape(128,2)\n",
        "# #print(pred_class.shape)\n",
        "\n",
        "# p = a.data[0].max(1, keepdim=True)[1]\n",
        "# p2 = a.data[1].max(1, keepdim=True)[1]\n",
        "# p=p.reshape(2)\n",
        "# p2 = p2.reshape(2)\n",
        "# #p[0].data.item()\n",
        "# p,_ = p.sort(descending=False)\n",
        "# #p.view(-1,2)\n",
        "# p=p.numpy()[:]\n",
        "\n",
        "# p\n",
        "\n",
        "\n",
        "\n",
        "\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "99-TMCEA0UQP",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# # a = np.ones(10)\n",
        "# # b = np.array([2,3])\n",
        "# # new = np.concatenate((a,b), axis=0)\n",
        "# # new.reshape(3,4)\n",
        "# # aa = []\n",
        "# # for i in range(10):\n",
        "# #   aa.append(i)\n",
        "\n",
        "# # print(aa)\n",
        "\n",
        "# # ab = np.ones((10,500,2))\n",
        "# # ab.shape\n",
        "# # ab[0].reshape(500,2)\n",
        "\n",
        "\n",
        "# d = np.ones((100,2,4))\n",
        "# c = np.zeros((100,2,4))\n",
        "# #cc = np.concatenate(d,c, axis=0)\n",
        "# #cc.shape\n",
        "# a = [np.array([1,2,3])]\n",
        "# def removearray(L,arr):\n",
        "#     ind = 0\n",
        "#     size = len(L)\n",
        "#     while ind != size and not np.array_equal(L[ind],arr):\n",
        "#         ind += 1\n",
        "#     if ind != size:\n",
        "#         L.pop(ind)\n",
        "#     else:\n",
        "#         raise ValueError('array not found in list.')\n",
        "# nparr = np.ones(2)\n",
        "# #removearray(a,nparr)\n",
        "# a[0]"
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}