{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "source": [
        "import os\n",
        "import sys\n",
        "import tempfile\n",
        "import torch\n",
        "import torch.distributed as dist\n",
        "import torch.nn as nn\n",
        "import torch.optim as optim\n",
        "import torch.multiprocessing as mp"
      ],
      "metadata": {
        "id": "IgVogeOfrEeZ"
      },
      "execution_count": 2,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "from torch.nn.parallel import DistributedDataParallel as DDP\n"
      ],
      "metadata": {
        "id": "rV7DU8FJrMe7"
      },
      "execution_count": 3,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def setup(rank, world_size):\n",
        "    os.environ['MASTER_ADDR'] = 'localhost'\n",
        "    os.environ['MASTER_PORT'] = '55555'\n",
        "\n",
        "    # initialize the process group\n",
        "    dist.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n",
        "\n",
        "def cleanup():\n",
        "    dist.destroy_process_group()"
      ],
      "metadata": {
        "id": "-4H7CJucrMal"
      },
      "execution_count": 4,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "class NNET_Model(nn.Module):\n",
        "    def __init__(self):\n",
        "        super(NNET_Model, self).__init__()\n",
        "        self.net1 = nn.Linear(10, 10)\n",
        "        self.relu = nn.ReLU()\n",
        "        self.net2 = nn.Linear(10, 5)\n",
        "\n",
        "    def forward(self, x):\n",
        "        return self.net2(self.relu(self.net1(x)))"
      ],
      "metadata": {
        "id": "K2SJNxiXrMX-"
      },
      "execution_count": 5,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def nnet_basic(rank, world_size):\n",
        "    print(f\"Running basic DDP example on rank {rank}.\")\n",
        "    setup(rank, world_size)\n",
        "\n",
        "    # create model and move it to GPU with id rank\n",
        "    model = NNET_Model().to(rank)\n",
        "    ddp_model = DDP(model, device_ids=[rank])\n",
        "\n",
        "    loss_fn = nn.MSELoss()\n",
        "    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n",
        "\n",
        "    optimizer.zero_grad()\n",
        "    outputs = ddp_model(torch.randn(20, 10))\n",
        "    labels = torch.randn(20, 5).to(rank)\n",
        "    loss_fn(outputs, labels).backward()\n",
        "    optimizer.step()\n",
        "\n",
        "    cleanup()"
      ],
      "metadata": {
        "id": "9vN7UfCXrbfg"
      },
      "execution_count": 6,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "nnet_basic(rank=1,world_size=4)"
      ],
      "metadata": {
        "id": "8nia8KK5x7__",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 443
        },
        "outputId": "fdebdb65-d0a2-4031-eeba-401a9e4cbc6c"
      },
      "execution_count": 7,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Running basic DDP example on rank 1.\n"
          ]
        },
        {
          "output_type": "error",
          "ename": "KeyboardInterrupt",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
            "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py\u001b[0m in \u001b[0;36mrun_code\u001b[0;34m(self, code_obj, result, async_)\u001b[0m\n\u001b[1;32m   3325\u001b[0m                 \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3326\u001b[0;31m                     \u001b[0mexec\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcode_obj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muser_global_ns\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muser_ns\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   3327\u001b[0m             \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m<ipython-input-7-9755f1cee42d>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mnnet_basic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mworld_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m4\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
            "\u001b[0;32m<ipython-input-6-a18f9a1ad61e>\u001b[0m in \u001b[0;36mnnet_basic\u001b[0;34m(rank, world_size)\u001b[0m\n\u001b[1;32m      2\u001b[0m     \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"Running basic DDP example on rank {rank}.\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m     \u001b[0msetup\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mworld_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m<ipython-input-4-bf95e9f8e9ef>\u001b[0m in \u001b[0;36msetup\u001b[0;34m(rank, world_size)\u001b[0m\n\u001b[1;32m      5\u001b[0m     \u001b[0;31m# initialize the process group\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m     \u001b[0mdist\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minit_process_group\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"gloo\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrank\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mworld_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mworld_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/distributed/distributed_c10d.py\u001b[0m in \u001b[0;36minit_process_group\u001b[0;34m(backend, init_method, timeout, world_size, rank, store, group_name, pg_options)\u001b[0m\n\u001b[1;32m    594\u001b[0m             )\n\u001b[0;32m--> 595\u001b[0;31m             \u001b[0mstore\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrank\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mworld_size\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrendezvous_iterator\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    596\u001b[0m             \u001b[0mstore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_timeout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/distributed/rendezvous.py\u001b[0m in \u001b[0;36m_env_rendezvous_handler\u001b[0;34m(url, timeout, **kwargs)\u001b[0m\n\u001b[1;32m    256\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 257\u001b[0;31m     \u001b[0mstore\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_create_c10d_store\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmaster_addr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmaster_port\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrank\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mworld_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    258\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/distributed/rendezvous.py\u001b[0m in \u001b[0;36m_create_c10d_store\u001b[0;34m(hostname, port, rank, world_size, timeout)\u001b[0m\n\u001b[1;32m    188\u001b[0m         return TCPStore(\n\u001b[0;32m--> 189\u001b[0;31m             \u001b[0mhostname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mport\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mworld_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstart_daemon\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmulti_tenant\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    190\u001b[0m         )\n",
            "\u001b[0;31mRuntimeError\u001b[0m: Interrupted system call",
            "\nDuring handling of the above exception, another exception occurred:\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import torch.distributed as dist\n",
        "def setup(rank, world_size):\n",
        "    os.environ['MASTER_ADDR'] = 'localhost'\n",
        "    os.environ['MASTER_PORT'] = '12355'\n",
        "    dist.init_process_group(\"nccl\", rank=rank, world_size=world_size)"
      ],
      "metadata": {
        "id": "EIGukNI26Hm0"
      },
      "execution_count": 8,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import torchvision.datasets as datasets"
      ],
      "metadata": {
        "id": "-CqWfwB18LFk"
      },
      "execution_count": 9,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=None)\n"
      ],
      "metadata": {
        "id": "Kokmd9BC8dR-"
      },
      "execution_count": 10,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "from torch.utils.data.distributed import DistributedSampler\n",
        "def prepare(rank, world_size, batch_size=32, pin_memory=False, num_workers=0):\n",
        "    dataset = mnist_trainset\n",
        "    sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank, shuffle=False, drop_last=False)\n",
        "    \n",
        "    dataloader = DataLoader(dataset, batch_size=batch_size, pin_memory=pin_memory, num_workers=num_workers, drop_last=False, shuffle=False, sampler=sampler)\n",
        "    \n",
        "    return dataloader"
      ],
      "metadata": {
        "id": "WHfA5Z_m8f1i"
      },
      "execution_count": 11,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "from torch.nn.parallel import DistributedDataParallel as DDP\n",
        "def main(rank, world_size):\n",
        "    # setup the process groups\n",
        "    setup(rank, world_size)\n",
        "    # prepare the dataloader\n",
        "    dataloader = prepare(rank, world_size)\n",
        "    \n",
        "    # instantiate the model(it's your own model) and move it to the right device\n",
        "    model = Model().to(rank)\n",
        "    \n",
        "    # wrap the model with DDP\n",
        "    # device_ids tell DDP where is your model\n",
        "    # output_device tells DDP where to output, in our case, it is rank\n",
        "    # find_unused_parameters=True instructs DDP to find unused output of the forward() function of any module in the model\n",
        "    model = DDP(model, device_ids=[rank], output_device=rank, find_unused_parameters=True)"
      ],
      "metadata": {
        "id": "LDoaohJH8s5C"
      },
      "execution_count": 12,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "if torch.cuda.is_available():\n",
        "    DEVICE = torch.device('cuda')\n",
        "    device_ids = list(range(torch.cuda.device_count()))\n",
        "    gpus = len(device_ids)\n",
        "    print('GPU detected')\n",
        "else:\n",
        "    DEVICE = torch.device(\"cpu\")\n",
        "    print('No GPU. switching to CPU')"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "MNtHXxlS9Oz4",
        "outputId": "76392b6f-bd6d-4bfb-cf6c-7f65a24cbc75"
      },
      "execution_count": 13,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "No GPU. switching to CPU\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import torch.quantization\n",
        "quantized_model = torch.quantization.quantize_dynamic(model, \n",
        "                                                      {torch.nn.Linear}, \n",
        "                                                      dtype=torch.qint8)"
      ],
      "metadata": {
        "id": "gVsSRbI5_Ow2",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 222
        },
        "outputId": "547f8fe7-0636-4492-cd2e-6b13b88ef4e8"
      },
      "execution_count": 14,
      "outputs": [
        {
          "output_type": "error",
          "ename": "NameError",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-14-cd3d5034e54b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mquantization\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m quantized_model = torch.quantization.quantize_dynamic(model, \n\u001b[0m\u001b[1;32m      3\u001b[0m                                                       \u001b[0;34m{\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m                                                       dtype=torch.qint8)\n",
            "\u001b[0;31mNameError\u001b[0m: name 'model' is not defined"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "print(quantized_model)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 169
        },
        "id": "83QZoAgDsnaJ",
        "outputId": "b0018ef6-a7b7-4a2b-de3f-158fae129db8"
      },
      "execution_count": 15,
      "outputs": [
        {
          "output_type": "error",
          "ename": "NameError",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-15-c9116e1e5c89>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mquantized_model\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
            "\u001b[0;31mNameError\u001b[0m: name 'quantized_model' is not defined"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "\n",
        "# insert observers\n",
        "torch.quantization.prepare(model, inplace=True)\n",
        "# Calibrate the model and collect statistics"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 204
        },
        "id": "n-QtMDI6srRN",
        "outputId": "a50370d1-69e9-48bf-c715-11a5cf66dbfb"
      },
      "execution_count": 16,
      "outputs": [
        {
          "output_type": "error",
          "ename": "NameError",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-16-70516fca10e7>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;31m# insert observers\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mquantization\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprepare\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minplace\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      3\u001b[0m \u001b[0;31m# Calibrate the model and collect statistics\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mNameError\u001b[0m: name 'model' is not defined"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "# convert to quantized version\n",
        "torch.quantization.convert(model, inplace=True)"
      ],
      "metadata": {
        "id": "elsovrdlu_I-"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# prepare QAT\n",
        "torch.quantization.prepare_qat(model, inplace=True)\n",
        "\n"
      ],
      "metadata": {
        "id": "8cLgMUE2vGxR"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "\n",
        "# convert to quantized version, removing dropout, to check for accuracy on each\n",
        "epochquantized_model=torch.quantization.convert(model.eval(), \n",
        "                                                inplace=False)"
      ],
      "metadata": {
        "id": "xpChPBvUwNZo"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "epochquantized_model"
      ],
      "metadata": {
        "id": "GUtIFXFhwRYH"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "from torch.quantization.observer import MinMaxObserver, MovingAverageMinMaxObserver, HistogramObserver\n",
        "C, L = 5, 5\n",
        "normal = torch.distributions.normal.Normal(0,1)\n",
        "inputs = [normal.sample((C, L)), normal.sample((C, L))]\n",
        "print(inputs)"
      ],
      "metadata": {
        "id": "9b-arkJnwTjN"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "observers = [MinMaxObserver(), MovingAverageMinMaxObserver(), HistogramObserver()]\n",
        "for obs in observers:\n",
        "  for x in inputs: obs(x) \n",
        "  print(obs.__class__.__name__, obs.calculate_qparams())"
      ],
      "metadata": {
        "id": "T8S61eZTx8kd"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "from torch.quantization.observer import MovingAveragePerChannelMinMaxObserver\n",
        "obs = MovingAveragePerChannelMinMaxObserver(ch_axis=0)  # calculate qparams for all `C` channels separately\n",
        "for x in inputs: obs(x)\n",
        "print(obs.calculate_qparams())\n",
        "\n"
      ],
      "metadata": {
        "id": "DlyAREjDyLxR"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import torch\n",
        "from torch import nn\n",
        "\n",
        "# toy model\n",
        "m = nn.Sequential(\n",
        "  nn.Conv2d(2, 64, (8,)),\n",
        "  nn.ReLU(),\n",
        "  nn.Linear(16,10),\n",
        "  nn.LSTM(10, 10))\n",
        "\n",
        "m.eval()\n"
      ],
      "metadata": {
        "id": "2FPWEQu-yjhZ"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "## EAGER MODE\n",
        "from torch.quantization import quantize_dynamic\n",
        "model_quantized = quantize_dynamic(\n",
        "    model=m, qconfig_spec={nn.LSTM, nn.Linear}, dtype=torch.qint8, inplace=False\n",
        ")\n",
        "print(model_quantized)"
      ],
      "metadata": {
        "id": "egYjSMd5zFzK"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "## FX MODE\n",
        "from torch.quantization import quantize_fx\n",
        "qconfig_dict = {\"\": torch.quantization.default_dynamic_qconfig}  # An empty key denotes the default applied to all modules\n",
        "model_prepared = quantize_fx.prepare_fx(m, qconfig_dict)\n",
        "model_quantized = quantize_fx.convert_fx(model_prepared)"
      ],
      "metadata": {
        "id": "VQkZs1BOzOgC"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "print(model_prepared)"
      ],
      "metadata": {
        "id": "pz6uisYzzSbJ"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "print((model_quantized))"
      ],
      "metadata": {
        "id": "yeTM8qM8zXxN"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import torch\n",
        "import torchvision\n",
        "import torchvision.transforms as transforms\n",
        "import torch.nn as nn\n",
        "import torch.nn.functional as F\n",
        "import torch.optim as optim\n",
        "import os\n",
        "from torch.utils.data import DataLoader\n",
        "import torch.quantization\n",
        "from torch.quantization import QuantStub, DeQuantStub"
      ],
      "metadata": {
        "id": "_lvL_vWEzcjZ"
      },
      "execution_count": 17,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "transform = transforms.Compose(\n",
        "    [transforms.ToTensor(),\n",
        "     transforms.Normalize((0.5,), (0.5,))])\n",
        "\n",
        "trainset = torchvision.datasets.MNIST(root='./data', train=True,\n",
        "                                        download=True, transform=transform)\n",
        "trainloader = torch.utils.data.DataLoader(trainset, batch_size=64,\n",
        "                                          shuffle=True, num_workers=16, pin_memory=True)\n",
        "\n",
        "testset = torchvision.datasets.MNIST(root='./data', train=False,\n",
        "                                       download=True, transform=transform)\n",
        "testloader = torch.utils.data.DataLoader(testset, batch_size=64,\n",
        "                                         shuffle=False, num_workers=16, pin_memory=True)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "zr0Kz0qizxrB",
        "outputId": "1d62cee1-b9c8-4e38-863a-6514094aa47b"
      },
      "execution_count": 18,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:566: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
            "  cpuset_checked))\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "class AverageMeter(object):\n",
        "    \"\"\"Computes and stores the average and current value\"\"\"\n",
        "    def __init__(self, name, fmt=':f'):\n",
        "        self.name = name\n",
        "        self.fmt = fmt\n",
        "        self.reset()\n",
        "\n",
        "    def reset(self):\n",
        "        self.val = 0\n",
        "        self.avg = 0\n",
        "        self.sum = 0\n",
        "        self.count = 0\n",
        "\n",
        "    def update(self, val, n=1):\n",
        "        self.val = val\n",
        "        self.sum += val * n\n",
        "        self.count += n\n",
        "        self.avg = self.sum / self.count\n",
        "\n",
        "    def __str__(self):\n",
        "        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n",
        "        return fmtstr.format(**self.__dict__)\n"
      ],
      "metadata": {
        "id": "A52LnTnyz8J5"
      },
      "execution_count": 19,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def accuracy(output, target):\n",
        "    \"\"\" Computes the top 1 accuracy \"\"\"\n",
        "    with torch.no_grad():\n",
        "        batch_size = target.size(0)\n",
        "\n",
        "        _, pred = output.topk(1, 1, True, True)\n",
        "        pred = pred.t()\n",
        "        correct = pred.eq(target.view(1, -1).expand_as(pred))\n",
        "\n",
        "        res = []\n",
        "        correct_one = correct[:1].view(-1).float().sum(0, keepdim=True)\n",
        "        return correct_one.mul_(100.0 / batch_size).item()\n"
      ],
      "metadata": {
        "id": "wBk2QBJF0J3C"
      },
      "execution_count": 20,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def print_size_of_model(model):\n",
        "    \"\"\" Prints the real size of the model \"\"\"\n",
        "    torch.save(model.state_dict(), \"temp.p\")\n",
        "    print('Size (MB):', os.path.getsize(\"temp.p\")/1e6)\n",
        "    os.remove('temp.p')\n",
        "\n",
        "def load_model(quantized_model, model):\n",
        "    \"\"\" Loads in the weights into an object meant for quantization \"\"\"\n",
        "    state_dict = model.state_dict()\n",
        "    model = model.to('cpu')\n",
        "    quantized_model.load_state_dict(state_dict)\n",
        "\n",
        "def fuse_modules(model):\n",
        "    \"\"\" Fuse together convolutions/linear layers and ReLU \"\"\"\n",
        "    torch.quantization.fuse_modules(model, [['conv1', 'relu1'], \n",
        "                                            ['conv2', 'relu2'],\n",
        "                                            ['fc1', 'relu3'],\n",
        "                                            ['fc2', 'relu4']], inplace=True)"
      ],
      "metadata": {
        "id": "dBY2P_Q10Mui"
      },
      "execution_count": 21,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "class Net(nn.Module):\n",
        "    def __init__(self, q = False):\n",
        "        # By turning on Q we can turn on/off the quantization\n",
        "        super(Net, self).__init__()\n",
        "        self.conv1 = nn.Conv2d(1, 6, 5, bias=False)\n",
        "        self.relu1 = nn.ReLU()\n",
        "        self.pool1 = nn.MaxPool2d(2, 2)\n",
        "        self.conv2 = nn.Conv2d(6, 16, 5, bias=False)\n",
        "        self.relu2 = nn.ReLU()\n",
        "        self.pool2 = nn.MaxPool2d(2, 2)\n",
        "        self.fc1 = nn.Linear(256, 120, bias=False)\n",
        "        self.relu3 = nn.ReLU()\n",
        "        self.fc2 = nn.Linear(120, 84, bias=False)\n",
        "        self.relu4 = nn.ReLU()\n",
        "        self.fc3 = nn.Linear(84, 10, bias=False)\n",
        "        self.q = q\n",
        "        if q:\n",
        "          self.quant = QuantStub()\n",
        "          self.dequant = DeQuantStub()\n",
        "\n",
        "    def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
        "        if self.q:\n",
        "          x = self.quant(x)\n",
        "        x = self.conv1(x)\n",
        "        x = self.relu1(x)\n",
        "        x = self.pool1(x)\n",
        "        x = self.conv2(x)\n",
        "        x = self.relu2(x)\n",
        "        x = self.pool2(x)\n",
        "        # Be careful to use reshape here instead of view\n",
        "        x = x.reshape(x.shape[0], -1)\n",
        "        x = self.fc1(x)\n",
        "        x = self.relu3(x)\n",
        "        x = self.fc2(x)\n",
        "        x = self.relu4(x)\n",
        "        x = self.fc3(x)\n",
        "        if self.q:\n",
        "          x = self.dequant(x)\n",
        "        return x"
      ],
      "metadata": {
        "id": "8VOZGlH10Q3D"
      },
      "execution_count": 22,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "net = Net(q=False)\n",
        "print_size_of_model(net)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "HVnA2ewY0VEE",
        "outputId": "c5c24a24-8863-4433-dd42-4aab4d119c67"
      },
      "execution_count": 23,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Size (MB): 0.178587\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "def train(model: nn.Module, dataloader: DataLoader, cuda=False, q=False):\n",
        "    criterion = nn.CrossEntropyLoss()\n",
        "    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n",
        "    model.train()\n",
        "    for epoch in range(20):  # loop over the dataset multiple times\n",
        "\n",
        "        running_loss = AverageMeter('loss')\n",
        "        acc = AverageMeter('train_acc')\n",
        "        for i, data in enumerate(dataloader, 0):\n",
        "            # get the inputs; data is a list of [inputs, labels]\n",
        "            inputs, labels = data\n",
        "            if cuda:\n",
        "              inputs = inputs.cuda()\n",
        "              labels = labels.cuda()\n",
        "\n",
        "            # zero the parameter gradients\n",
        "            optimizer.zero_grad()\n",
        "\n",
        "            if epoch>=3 and q:\n",
        "              model.apply(torch.quantization.disable_observer)\n",
        "\n",
        "            # forward + backward + optimize\n",
        "            outputs = model(inputs)\n",
        "            loss = criterion(outputs, labels)\n",
        "            loss.backward()\n",
        "            optimizer.step()\n",
        "\n",
        "            # print statistics\n",
        "            running_loss.update(loss.item(), outputs.shape[0])\n",
        "            acc.update(accuracy(outputs, labels), outputs.shape[0])\n",
        "            if i % 100 == 0:    # print every 100 mini-batches\n",
        "                print('[%d, %5d] ' %\n",
        "                    (epoch + 1, i + 1), running_loss, acc)\n",
        "    print('Finished Training')\n"
      ],
      "metadata": {
        "id": "tnocNGTG0Xy2"
      },
      "execution_count": 24,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def test(model: nn.Module, dataloader: DataLoader, cuda=False) -> float:\n",
        "    correct = 0\n",
        "    total = 0\n",
        "    model.eval()\n",
        "    with torch.no_grad():\n",
        "        for data in dataloader:\n",
        "            inputs, labels = data\n",
        "\n",
        "            if cuda:\n",
        "              inputs = inputs.cuda()\n",
        "              labels = labels.cuda()\n",
        "\n",
        "            outputs = model(inputs)\n",
        "            _, predicted = torch.max(outputs.data, 1)\n",
        "            total += labels.size(0)\n",
        "            correct += (predicted == labels).sum().item()\n",
        "    \n",
        "    return 100 * correct / total"
      ],
      "metadata": {
        "id": "4f5j3hcv01ON"
      },
      "execution_count": 25,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "train(net, trainloader)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "MG0fRJih05BI",
        "outputId": "be621098-b917-4660-cbf8-27bbb9845c23"
      },
      "execution_count": 26,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "[1,     1]  loss 2.307441 (2.307441) train_acc 7.812500 (7.812500)\n",
            "[1,   101]  loss 2.300047 (2.303994) train_acc 18.750000 (10.210396)\n",
            "[1,   201]  loss 2.296141 (2.301046) train_acc 15.625000 (13.611629)\n",
            "[1,   301]  loss 2.286289 (2.298150) train_acc 31.250000 (16.808555)\n",
            "[1,   401]  loss 2.284664 (2.294502) train_acc 17.187500 (19.661783)\n",
            "[1,   501]  loss 2.255574 (2.289048) train_acc 37.500000 (22.386477)\n",
            "[1,   601]  loss 2.231334 (2.279399) train_acc 28.125000 (24.779014)\n",
            "[1,   701]  loss 2.066952 (2.256564) train_acc 32.812500 (26.658345)\n",
            "[1,   801]  loss 1.360105 (2.189891) train_acc 57.812500 (28.938436)\n",
            "[1,   901]  loss 0.803967 (2.081595) train_acc 79.687500 (32.498613)\n",
            "[2,     1]  loss 0.955617 (0.955617) train_acc 70.312500 (70.312500)\n",
            "[2,   101]  loss 0.593146 (0.760600) train_acc 84.375000 (76.392327)\n",
            "[2,   201]  loss 0.481754 (0.666202) train_acc 81.250000 (79.228856)\n",
            "[2,   301]  loss 0.334634 (0.594760) train_acc 90.625000 (81.359012)\n",
            "[2,   401]  loss 0.379376 (0.544992) train_acc 85.937500 (83.015118)\n",
            "[2,   501]  loss 0.215820 (0.500436) train_acc 92.187500 (84.428019)\n",
            "[2,   601]  loss 0.574859 (0.469746) train_acc 81.250000 (85.375936)\n",
            "[2,   701]  loss 0.204049 (0.441703) train_acc 93.750000 (86.285218)\n",
            "[2,   801]  loss 0.177470 (0.417470) train_acc 96.875000 (87.035737)\n",
            "[2,   901]  loss 0.178617 (0.400098) train_acc 96.875000 (87.579772)\n",
            "[3,     1]  loss 0.125772 (0.125772) train_acc 95.312500 (95.312500)\n",
            "[3,   101]  loss 0.160456 (0.225005) train_acc 93.750000 (92.806312)\n",
            "[3,   201]  loss 0.222082 (0.211408) train_acc 93.750000 (93.283582)\n",
            "[3,   301]  loss 0.204903 (0.212048) train_acc 92.187500 (93.402201)\n",
            "[3,   401]  loss 0.297458 (0.204181) train_acc 92.187500 (93.746103)\n",
            "[3,   501]  loss 0.118409 (0.199651) train_acc 98.437500 (93.831088)\n",
            "[3,   601]  loss 0.217516 (0.195058) train_acc 93.750000 (94.043781)\n",
            "[3,   701]  loss 0.128557 (0.190948) train_acc 95.312500 (94.182418)\n",
            "[3,   801]  loss 0.267558 (0.189668) train_acc 90.625000 (94.218165)\n",
            "[3,   901]  loss 0.056644 (0.185446) train_acc 100.000000 (94.336154)\n",
            "[4,     1]  loss 0.039763 (0.039763) train_acc 100.000000 (100.000000)\n",
            "[4,   101]  loss 0.104011 (0.147160) train_acc 95.312500 (95.931312)\n",
            "[4,   201]  loss 0.198893 (0.144352) train_acc 96.875000 (95.732276)\n",
            "[4,   301]  loss 0.226336 (0.143479) train_acc 95.312500 (95.743355)\n",
            "[4,   401]  loss 0.174057 (0.144925) train_acc 92.187500 (95.635910)\n",
            "[4,   601]  loss 0.074265 (0.138408) train_acc 96.875000 (95.785670)\n",
            "[4,   701]  loss 0.118419 (0.137942) train_acc 96.875000 (95.791726)\n",
            "[4,   801]  loss 0.094641 (0.136691) train_acc 96.875000 (95.827481)\n",
            "[4,   901]  loss 0.071122 (0.135026) train_acc 96.875000 (95.872642)\n",
            "[5,     1]  loss 0.062921 (0.062921) train_acc 100.000000 (100.000000)\n",
            "[5,   101]  loss 0.103457 (0.114784) train_acc 93.750000 (96.364480)\n",
            "[5,   201]  loss 0.055498 (0.110326) train_acc 98.437500 (96.540734)\n",
            "[5,   301]  loss 0.077907 (0.110013) train_acc 96.875000 (96.547965)\n",
            "[5,   401]  loss 0.055279 (0.111774) train_acc 98.437500 (96.477556)\n",
            "[5,   501]  loss 0.186656 (0.112124) train_acc 92.187500 (96.497630)\n",
            "[5,   601]  loss 0.061598 (0.112022) train_acc 98.437500 (96.513623)\n",
            "[5,   701]  loss 0.202779 (0.110696) train_acc 95.312500 (96.556259)\n",
            "[5,   801]  loss 0.108798 (0.110202) train_acc 98.437500 (96.598002)\n",
            "[5,   901]  loss 0.103532 (0.108981) train_acc 95.312500 (96.663430)\n",
            "[6,     1]  loss 0.202697 (0.202697) train_acc 95.312500 (95.312500)\n",
            "[6,   101]  loss 0.062813 (0.099797) train_acc 98.437500 (97.014233)\n",
            "[6,   201]  loss 0.027082 (0.098597) train_acc 100.000000 (97.007152)\n",
            "[6,   301]  loss 0.094634 (0.099069) train_acc 96.875000 (97.035922)\n",
            "[6,   401]  loss 0.152566 (0.099163) train_acc 93.750000 (97.054239)\n",
            "[6,   501]  loss 0.088397 (0.098222) train_acc 98.437500 (97.040294)\n",
            "[6,   601]  loss 0.122729 (0.097959) train_acc 95.312500 (97.028390)\n",
            "[6,   701]  loss 0.077927 (0.096625) train_acc 98.437500 (97.077835)\n",
            "[6,   801]  loss 0.108721 (0.095925) train_acc 96.875000 (97.077871)\n",
            "[6,   901]  loss 0.056916 (0.094402) train_acc 98.437500 (97.145533)\n",
            "[7,     1]  loss 0.020722 (0.020722) train_acc 100.000000 (100.000000)\n",
            "[7,   101]  loss 0.038244 (0.083343) train_acc 100.000000 (97.617574)\n",
            "[7,   201]  loss 0.021240 (0.085649) train_acc 100.000000 (97.489117)\n",
            "[7,   301]  loss 0.104951 (0.084510) train_acc 96.875000 (97.425249)\n",
            "[7,   401]  loss 0.047119 (0.086712) train_acc 98.437500 (97.397132)\n",
            "[7,   501]  loss 0.218136 (0.085530) train_acc 92.187500 (97.423902)\n",
            "[7,   601]  loss 0.038785 (0.084620) train_acc 100.000000 (97.433964)\n",
            "[7,   701]  loss 0.345704 (0.084578) train_acc 95.312500 (97.396576)\n",
            "[7,   801]  loss 0.196896 (0.083831) train_acc 92.187500 (97.419242)\n",
            "[7,   901]  loss 0.156493 (0.084492) train_acc 95.312500 (97.393521)\n",
            "[8,     1]  loss 0.018892 (0.018892) train_acc 100.000000 (100.000000)\n",
            "[8,   101]  loss 0.069294 (0.082367) train_acc 98.437500 (97.400990)\n",
            "[8,   201]  loss 0.118025 (0.074977) train_acc 96.875000 (97.784515)\n",
            "[8,   301]  loss 0.071061 (0.074512) train_acc 96.875000 (97.752284)\n",
            "[8,   401]  loss 0.018875 (0.075267) train_acc 100.000000 (97.689370)\n",
            "[8,   501]  loss 0.088267 (0.075851) train_acc 98.437500 (97.632859)\n",
            "[8,   601]  loss 0.051398 (0.074771) train_acc 98.437500 (97.686148)\n",
            "[8,   701]  loss 0.106481 (0.074657) train_acc 96.875000 (97.704173)\n",
            "[8,   801]  loss 0.129811 (0.074460) train_acc 95.312500 (97.713795)\n",
            "[8,   901]  loss 0.039220 (0.075225) train_acc 100.000000 (97.709143)\n",
            "[9,     1]  loss 0.028281 (0.028281) train_acc 100.000000 (100.000000)\n",
            "[9,   101]  loss 0.069478 (0.072620) train_acc 98.437500 (97.725866)\n",
            "[9,   201]  loss 0.152445 (0.074881) train_acc 93.750000 (97.597948)\n",
            "[9,   301]  loss 0.082708 (0.072442) train_acc 98.437500 (97.726329)\n",
            "[9,   401]  loss 0.109835 (0.072019) train_acc 96.875000 (97.778990)\n",
            "[9,   501]  loss 0.060277 (0.072631) train_acc 98.437500 (97.760729)\n",
            "[9,   601]  loss 0.065937 (0.070488) train_acc 98.437500 (97.826539)\n",
            "[9,   701]  loss 0.014126 (0.070274) train_acc 100.000000 (97.849055)\n",
            "[9,   801]  loss 0.032657 (0.067930) train_acc 98.437500 (97.912765)\n",
            "[9,   901]  loss 0.076376 (0.068204) train_acc 98.437500 (97.910308)\n",
            "[10,     1]  loss 0.003823 (0.003823) train_acc 100.000000 (100.000000)\n",
            "[10,   101]  loss 0.052691 (0.058593) train_acc 96.875000 (98.236386)\n",
            "[10,   201]  loss 0.066795 (0.066741) train_acc 98.437500 (97.947761)\n",
            "[10,   301]  loss 0.042395 (0.062642) train_acc 98.437500 (98.063746)\n",
            "[10,   401]  loss 0.033428 (0.061129) train_acc 98.437500 (98.082918)\n",
            "[10,   501]  loss 0.042198 (0.060756) train_acc 98.437500 (98.097555)\n",
            "[10,   601]  loss 0.022638 (0.061728) train_acc 100.000000 (98.052725)\n",
            "[10,   701]  loss 0.050612 (0.062882) train_acc 96.875000 (98.058577)\n",
            "[10,   801]  loss 0.025219 (0.062573) train_acc 100.000000 (98.064919)\n",
            "[10,   901]  loss 0.047493 (0.062641) train_acc 98.437500 (98.068119)\n",
            "[11,     1]  loss 0.028876 (0.028876) train_acc 100.000000 (100.000000)\n",
            "[11,   101]  loss 0.009008 (0.051548) train_acc 100.000000 (98.499381)\n",
            "[11,   201]  loss 0.075398 (0.053024) train_acc 96.875000 (98.398632)\n",
            "[11,   301]  loss 0.225170 (0.056926) train_acc 96.875000 (98.312915)\n",
            "[11,   401]  loss 0.025252 (0.059505) train_acc 100.000000 (98.207606)\n",
            "[11,   501]  loss 0.065445 (0.060140) train_acc 100.000000 (98.225424)\n",
            "[11,   601]  loss 0.046884 (0.060416) train_acc 98.437500 (98.182716)\n",
            "[11,   701]  loss 0.025708 (0.059385) train_acc 100.000000 (98.205688)\n",
            "[11,   801]  loss 0.025495 (0.059464) train_acc 100.000000 (98.195615)\n",
            "[11,   901]  loss 0.031658 (0.058235) train_acc 98.437500 (98.241537)\n",
            "[12,     1]  loss 0.030166 (0.030166) train_acc 100.000000 (100.000000)\n",
            "[12,   101]  loss 0.058315 (0.045895) train_acc 96.875000 (98.499381)\n",
            "[12,   201]  loss 0.045333 (0.053136) train_acc 98.437500 (98.313122)\n",
            "[12,   301]  loss 0.067237 (0.054301) train_acc 96.875000 (98.271387)\n",
            "[12,   401]  loss 0.047100 (0.055271) train_acc 96.875000 (98.273847)\n",
            "[12,   501]  loss 0.010331 (0.054966) train_acc 100.000000 (98.290918)\n",
            "[12,   601]  loss 0.040698 (0.054946) train_acc 98.437500 (98.304908)\n",
            "[12,   701]  loss 0.011931 (0.053334) train_acc 100.000000 (98.361715)\n",
            "[12,   801]  loss 0.047212 (0.053899) train_acc 98.437500 (98.349719)\n",
            "[12,   901]  loss 0.021073 (0.053742) train_acc 98.437500 (98.359462)\n",
            "[13,     1]  loss 0.015100 (0.015100) train_acc 100.000000 (100.000000)\n",
            "[13,   101]  loss 0.013431 (0.040558) train_acc 100.000000 (98.839728)\n",
            "[13,   201]  loss 0.009893 (0.042992) train_acc 100.000000 (98.756219)\n",
            "[13,   301]  loss 0.021011 (0.046404) train_acc 100.000000 (98.639950)\n",
            "[13,   401]  loss 0.059951 (0.049754) train_acc 98.437500 (98.566085)\n",
            "[13,   501]  loss 0.026911 (0.051534) train_acc 98.437500 (98.478044)\n",
            "[13,   601]  loss 0.025277 (0.051864) train_acc 100.000000 (98.445300)\n",
            "[13,   701]  loss 0.123020 (0.050886) train_acc 95.312500 (98.466476)\n",
            "[13,   801]  loss 0.012711 (0.050310) train_acc 100.000000 (98.476514)\n",
            "[13,   901]  loss 0.015683 (0.050309) train_acc 100.000000 (98.453108)\n",
            "[14,     1]  loss 0.015006 (0.015006) train_acc 100.000000 (100.000000)\n",
            "[14,   101]  loss 0.057308 (0.047587) train_acc 98.437500 (98.715965)\n",
            "[14,   201]  loss 0.044628 (0.048481) train_acc 98.437500 (98.670709)\n",
            "[14,   301]  loss 0.020349 (0.049961) train_acc 100.000000 (98.551703)\n",
            "[14,   401]  loss 0.046586 (0.050078) train_acc 98.437500 (98.515430)\n",
            "[14,   501]  loss 0.017401 (0.048794) train_acc 98.437500 (98.534182)\n",
            "[14,   601]  loss 0.020159 (0.048963) train_acc 100.000000 (98.531094)\n",
            "[14,   701]  loss 0.093015 (0.048650) train_acc 96.875000 (98.544490)\n",
            "[14,   801]  loss 0.092841 (0.047548) train_acc 96.875000 (98.572097)\n",
            "[14,   901]  loss 0.075724 (0.047074) train_acc 96.875000 (98.572766)\n",
            "[15,     1]  loss 0.029175 (0.029175) train_acc 100.000000 (100.000000)\n",
            "[15,   101]  loss 0.004721 (0.045490) train_acc 100.000000 (98.514851)\n",
            "[15,   201]  loss 0.040581 (0.042149) train_acc 98.437500 (98.686256)\n",
            "[15,   301]  loss 0.044805 (0.041544) train_acc 98.437500 (98.681478)\n",
            "[15,   401]  loss 0.089017 (0.040428) train_acc 98.437500 (98.753117)\n",
            "[15,   501]  loss 0.001939 (0.041203) train_acc 100.000000 (98.727545)\n",
            "[15,   601]  loss 0.031541 (0.042560) train_acc 98.437500 (98.679285)\n",
            "[15,   701]  loss 0.047192 (0.042918) train_acc 96.875000 (98.684914)\n",
            "[15,   801]  loss 0.011530 (0.043959) train_acc 100.000000 (98.642322)\n",
            "[15,   901]  loss 0.030178 (0.044269) train_acc 98.437500 (98.638665)\n",
            "[16,     1]  loss 0.006916 (0.006916) train_acc 100.000000 (100.000000)\n",
            "[16,   101]  loss 0.147809 (0.041792) train_acc 96.875000 (98.700495)\n",
            "[16,   201]  loss 0.033188 (0.041711) train_acc 98.437500 (98.655162)\n",
            "[16,   301]  loss 0.017665 (0.042114) train_acc 100.000000 (98.707434)\n",
            "[16,   401]  loss 0.020114 (0.042663) train_acc 100.000000 (98.679084)\n",
            "[16,   501]  loss 0.014894 (0.042825) train_acc 100.000000 (98.662051)\n",
            "[16,   601]  loss 0.009032 (0.041500) train_acc 100.000000 (98.702683)\n",
            "[16,   701]  loss 0.011151 (0.041410) train_acc 100.000000 (98.700517)\n",
            "[16,   801]  loss 0.063772 (0.041787) train_acc 98.437500 (98.700843)\n",
            "[16,   901]  loss 0.065937 (0.041865) train_acc 96.875000 (98.708033)\n",
            "[17,     1]  loss 0.110136 (0.110136) train_acc 98.437500 (98.437500)\n",
            "[17,   101]  loss 0.073887 (0.045636) train_acc 96.875000 (98.638614)\n",
            "[17,   201]  loss 0.002732 (0.043471) train_acc 100.000000 (98.662935)\n",
            "[17,   301]  loss 0.186425 (0.040393) train_acc 95.312500 (98.790490)\n",
            "[17,   401]  loss 0.038164 (0.040455) train_acc 98.437500 (98.795979)\n",
            "[17,   501]  loss 0.002354 (0.039810) train_acc 100.000000 (98.808633)\n",
            "[17,   601]  loss 0.070873 (0.040013) train_acc 96.875000 (98.809276)\n",
            "[17,   701]  loss 0.013070 (0.039967) train_acc 100.000000 (98.796362)\n",
            "[17,   801]  loss 0.010988 (0.039715) train_acc 100.000000 (98.802278)\n",
            "[17,   901]  loss 0.039281 (0.039474) train_acc 98.437500 (98.796476)\n",
            "[18,     1]  loss 0.030804 (0.030804) train_acc 98.437500 (98.437500)\n",
            "[18,   101]  loss 0.090327 (0.037855) train_acc 96.875000 (98.793317)\n",
            "[18,   201]  loss 0.009981 (0.037890) train_acc 100.000000 (98.865050)\n",
            "[18,   301]  loss 0.051971 (0.036907) train_acc 98.437500 (98.909884)\n",
            "[18,   401]  loss 0.009996 (0.037192) train_acc 100.000000 (98.877805)\n",
            "[18,   501]  loss 0.017703 (0.038047) train_acc 100.000000 (98.833583)\n",
            "[18,   601]  loss 0.037130 (0.038114) train_acc 98.437500 (98.809276)\n",
            "[18,   701]  loss 0.051285 (0.037896) train_acc 98.437500 (98.825339)\n",
            "[18,   801]  loss 0.006280 (0.037815) train_acc 100.000000 (98.831539)\n",
            "[18,   901]  loss 0.027157 (0.038016) train_acc 98.437500 (98.836362)\n",
            "[19,     1]  loss 0.011995 (0.011995) train_acc 100.000000 (100.000000)\n",
            "[19,   101]  loss 0.010111 (0.044050) train_acc 100.000000 (98.731436)\n",
            "[19,   201]  loss 0.104514 (0.041624) train_acc 95.312500 (98.787313)\n",
            "[19,   301]  loss 0.011888 (0.038384) train_acc 100.000000 (98.868355)\n",
            "[19,   401]  loss 0.093179 (0.036975) train_acc 96.875000 (98.897288)\n",
            "[19,   501]  loss 0.003231 (0.037241) train_acc 100.000000 (98.889721)\n",
            "[19,   601]  loss 0.035910 (0.036435) train_acc 98.437500 (98.918469)\n",
            "[19,   701]  loss 0.020352 (0.035921) train_acc 100.000000 (98.943474)\n",
            "[19,   801]  loss 0.022800 (0.036131) train_acc 98.437500 (98.932974)\n",
            "[19,   901]  loss 0.017342 (0.035927) train_acc 100.000000 (98.910932)\n",
            "[20,     1]  loss 0.020041 (0.020041) train_acc 100.000000 (100.000000)\n",
            "[20,   101]  loss 0.050493 (0.035328) train_acc 98.437500 (98.870668)\n",
            "[20,   201]  loss 0.014758 (0.036949) train_acc 100.000000 (98.771766)\n",
            "[20,   301]  loss 0.029627 (0.034523) train_acc 98.437500 (98.857973)\n",
            "[20,   401]  loss 0.006670 (0.034597) train_acc 100.000000 (98.873909)\n",
            "[20,   501]  loss 0.081533 (0.034947) train_acc 98.437500 (98.864770)\n",
            "[20,   601]  loss 0.032984 (0.035790) train_acc 98.437500 (98.866473)\n",
            "[20,   701]  loss 0.103274 (0.035447) train_acc 98.437500 (98.898894)\n",
            "[20,   801]  loss 0.004635 (0.034987) train_acc 100.000000 (98.911517)\n",
            "[20,   901]  loss 0.101511 (0.034879) train_acc 98.437500 (98.921337)\n",
            "Finished Training\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "score = test(net, testloader, cuda=False)\n",
        "print('Accuracy of the network on the test images: {}% - FP32'.format(score))"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "I01c5BSa08r0",
        "outputId": "60dec53b-a3cf-41a5-b0cf-d805c76fd52a"
      },
      "execution_count": 27,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Accuracy of the network on the test images: 98.65% - FP32\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "#Post-training quantization"
      ],
      "metadata": {
        "id": "hsjMN7kU1FYl"
      },
      "execution_count": 28,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "qnet = Net(q=True)\n",
        "load_model(qnet, net)\n",
        "fuse_modules(qnet)"
      ],
      "metadata": {
        "id": "Be0Kyfgt1I73"
      },
      "execution_count": 29,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "print_size_of_model(qnet)\n",
        "score = test(qnet, testloader, cuda=False)\n",
        "print('Accuracy of the fused network on the test images: {}% - FP32'.format(score))"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "fBKACTCL1Owp",
        "outputId": "3e3419e8-d2d3-4a99-8fdb-5ae336158b70"
      },
      "execution_count": 30,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Size (MB): 0.178843\n",
            "Accuracy of the fused network on the test images: 98.65% - FP32\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "qnet.qconfig = torch.quantization.default_qconfig\n",
        "print(qnet.qconfig)\n",
        "torch.quantization.prepare(qnet, inplace=True)\n",
        "print('Post Training Quantization Prepare: Inserting Observers')\n",
        "print('\\n Conv1: After observer insertion \\n\\n', qnet.conv1)\n",
        "\n",
        "test(qnet, trainloader, cuda=False)\n",
        "print('Post Training Quantization: Calibration done')\n",
        "torch.quantization.convert(qnet, inplace=True)\n",
        "print('Post Training Quantization: Convert done')\n",
        "print('\\n Conv1: After fusion and quantization \\n\\n', qnet.conv1)\n",
        "print(\"Size of model after quantization\")\n",
        "print_size_of_model(qnet)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "kOcxvHLg1SZd",
        "outputId": "6ac94316-d67f-4bd0-cefa-4e93c382d830"
      },
      "execution_count": 31,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "QConfig(activation=functools.partial(<class 'torch.ao.quantization.observer.MinMaxObserver'>, quant_min=0, quant_max=127){}, weight=functools.partial(<class 'torch.ao.quantization.observer.MinMaxObserver'>, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric){})\n",
            "Post Training Quantization Prepare: Inserting Observers\n",
            "\n",
            " Conv1: After observer insertion \n",
            "\n",
            " ConvReLU2d(\n",
            "  (0): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1), bias=False)\n",
            "  (1): ReLU()\n",
            "  (activation_post_process): MinMaxObserver(min_val=inf, max_val=-inf)\n",
            ")\n",
            "Post Training Quantization: Calibration done\n",
            "Post Training Quantization: Convert done\n",
            "\n",
            " Conv1: After fusion and quantization \n",
            "\n",
            " QuantizedConvReLU2d(1, 6, kernel_size=(5, 5), stride=(1, 1), scale=0.06902680546045303, zero_point=0, bias=False)\n",
            "Size of model after quantization\n",
            "Size (MB): 0.049714\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "score = test(qnet, testloader, cuda=False)\n",
        "print('Accuracy of the fused and quantized network on the test images: {}% - INT8'.format(score))"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "TIM5tF-L1aUi",
        "outputId": "b9cd13be-8c16-4cf4-ec92-9612f0dcf072"
      },
      "execution_count": 32,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Accuracy of the fused and quantized network on the test images: 98.58% - INT8\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "from torch.quantization.observer import MovingAverageMinMaxObserver\n",
        "\n",
        "qnet = Net(q=True)\n",
        "load_model(qnet, net)\n",
        "fuse_modules(qnet)\n",
        "\n",
        "qnet.qconfig = torch.quantization.QConfig(\n",
        "                                      activation=MovingAverageMinMaxObserver.with_args(reduce_range=True), \n",
        "                                      weight=MovingAverageMinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric))\n",
        "print(qnet.qconfig)\n",
        "torch.quantization.prepare(qnet, inplace=True)\n",
        "print('Post Training Quantization Prepare: Inserting Observers')\n",
        "print('\\n Conv1: After observer insertion \\n\\n', qnet.conv1)\n",
        "\n",
        "test(qnet, trainloader, cuda=False)\n",
        "print('Post Training Quantization: Calibration done')\n",
        "torch.quantization.convert(qnet, inplace=True)\n",
        "print('Post Training Quantization: Convert done')\n",
        "print('\\n Conv1: After fusion and quantization \\n\\n', qnet.conv1)\n",
        "print(\"Size of model after quantization\")\n",
        "print_size_of_model(qnet)\n",
        "score = test(qnet, testloader, cuda=False)\n",
        "print('Accuracy of the fused and quantized network on the test images: {}% - INT8'.format(score))"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "J8MTj3wD1eeo",
        "outputId": "9c5121d1-750f-495c-e181-0acaa72de560"
      },
      "execution_count": 33,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "QConfig(activation=functools.partial(<class 'torch.ao.quantization.observer.MovingAverageMinMaxObserver'>, reduce_range=True){}, weight=functools.partial(<class 'torch.ao.quantization.observer.MovingAverageMinMaxObserver'>, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric){})\n",
            "Post Training Quantization Prepare: Inserting Observers\n",
            "\n",
            " Conv1: After observer insertion \n",
            "\n",
            " ConvReLU2d(\n",
            "  (0): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1), bias=False)\n",
            "  (1): ReLU()\n",
            "  (activation_post_process): MovingAverageMinMaxObserver(min_val=inf, max_val=-inf)\n",
            ")\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "/usr/local/lib/python3.7/dist-packages/torch/ao/quantization/observer.py:178: UserWarning: Please use quant_min and quant_max to specify the range for observers.                     reduce_range will be deprecated in a future release of PyTorch.\n",
            "  reduce_range will be deprecated in a future release of PyTorch.\"\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Post Training Quantization: Calibration done\n",
            "Post Training Quantization: Convert done\n",
            "\n",
            " Conv1: After fusion and quantization \n",
            "\n",
            " QuantizedConvReLU2d(1, 6, kernel_size=(5, 5), stride=(1, 1), scale=0.06884118169546127, zero_point=0, bias=False)\n",
            "Size of model after quantization\n",
            "Size (MB): 0.049714\n",
            "Accuracy of the fused and quantized network on the test images: 98.6% - INT8\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "qnet = Net(q=True)\n",
        "load_model(qnet, net)\n",
        "fuse_modules(qnet)"
      ],
      "metadata": {
        "id": "Zr9VvGYy1jBZ"
      },
      "execution_count": 34,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "qnet.qconfig = torch.quantization.get_default_qconfig('fbgemm')\n",
        "print(qnet.qconfig)\n",
        "\n",
        "torch.quantization.prepare(qnet, inplace=True)\n",
        "test(qnet, trainloader, cuda=False)\n",
        "torch.quantization.convert(qnet, inplace=True)\n",
        "print(\"Size of model after quantization\")\n",
        "print_size_of_model(qnet)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "5UmC5jd71ozt",
        "outputId": "107b7dc5-d390-4137-e5f7-ea6280f77eba"
      },
      "execution_count": 35,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "QConfig(activation=functools.partial(<class 'torch.ao.quantization.observer.HistogramObserver'>, reduce_range=True){}, weight=functools.partial(<class 'torch.ao.quantization.observer.PerChannelMinMaxObserver'>, dtype=torch.qint8, qscheme=torch.per_channel_symmetric){})\n",
            "Size of model after quantization\n",
            "Size (MB): 0.055572\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "score = test(qnet, testloader, cuda=False)\n",
        "print('Accuracy of the fused and quantized network on the test images: {}% - INT8'.format(score))"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "4iEoLMXO1rc3",
        "outputId": "c189790c-9e00-4479-966e-f1c412cce863"
      },
      "execution_count": 36,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Accuracy of the fused and quantized network on the test images: 98.58% - INT8\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "qnet = Net(q=True)\n",
        "fuse_modules(qnet)\n",
        "qnet.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n",
        "torch.quantization.prepare_qat(qnet, inplace=True)\n",
        "print('\\n Conv1: After fusion and quantization \\n\\n', qnet.conv1)\n",
        "qnet=qnet"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "16dn3gDe1vDC",
        "outputId": "28d2f3c8-4df2-4bee-b8dd-7904661ff677"
      },
      "execution_count": 37,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "\n",
            " Conv1: After fusion and quantization \n",
            "\n",
            " ConvReLU2d(\n",
            "  1, 6, kernel_size=(5, 5), stride=(1, 1), bias=False\n",
            "  (weight_fake_quant): FusedMovingAvgObsFakeQuantize(\n",
            "    fake_quant_enabled=tensor([1]), observer_enabled=tensor([1]), scale=tensor([1.]), zero_point=tensor([0], dtype=torch.int32), dtype=torch.qint8, quant_min=-128, quant_max=127, qscheme=torch.per_channel_symmetric, reduce_range=False\n",
            "    (activation_post_process): MovingAveragePerChannelMinMaxObserver(min_val=tensor([]), max_val=tensor([]))\n",
            "  )\n",
            "  (activation_post_process): FusedMovingAvgObsFakeQuantize(\n",
            "    fake_quant_enabled=tensor([1]), observer_enabled=tensor([1]), scale=tensor([1.]), zero_point=tensor([0], dtype=torch.int32), dtype=torch.quint8, quant_min=0, quant_max=127, qscheme=torch.per_tensor_affine, reduce_range=True\n",
            "    (activation_post_process): MovingAverageMinMaxObserver(min_val=inf, max_val=-inf)\n",
            "  )\n",
            ")\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "train(qnet, trainloader, cuda=False)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "ZHUIYt1V1zwy",
        "outputId": "e2cba9a7-d181-4e37-8225-2d3103e789a1"
      },
      "execution_count": 38,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "[1,     1]  loss 2.304912 (2.304912) train_acc 6.250000 (6.250000)\n",
            "[1,   101]  loss 2.302224 (2.298668) train_acc 14.062500 (12.376238)\n",
            "[1,   201]  loss 2.286923 (2.295222) train_acc 29.687500 (16.106965)\n",
            "[1,   301]  loss 2.270232 (2.290615) train_acc 45.312500 (22.809385)\n",
            "[1,   401]  loss 2.254775 (2.284239) train_acc 57.812500 (28.541926)\n",
            "[1,   501]  loss 2.222739 (2.275096) train_acc 64.062500 (33.751248)\n",
            "[1,   601]  loss 2.156912 (2.261394) train_acc 68.750000 (38.334547)\n",
            "[1,   701]  loss 2.057673 (2.240037) train_acc 70.312500 (42.800464)\n",
            "[1,   801]  loss 1.874877 (2.207100) train_acc 79.687500 (46.553137)\n",
            "[1,   901]  loss 1.593751 (2.156483) train_acc 70.312500 (49.732936)\n",
            "[2,     1]  loss 1.461501 (1.461501) train_acc 81.250000 (81.250000)\n",
            "[2,   101]  loss 1.003205 (1.255670) train_acc 82.812500 (78.511757)\n",
            "[2,   201]  loss 0.705667 (1.058748) train_acc 87.500000 (80.185012)\n",
            "[2,   301]  loss 0.581954 (0.912321) train_acc 81.250000 (81.499169)\n",
            "[2,   401]  loss 0.417181 (0.808620) train_acc 89.062500 (82.465711)\n",
            "[2,   501]  loss 0.395143 (0.729751) train_acc 87.500000 (83.607784)\n",
            "[2,   601]  loss 0.326027 (0.671504) train_acc 89.062500 (84.434796)\n",
            "[2,   701]  loss 0.326395 (0.622526) train_acc 95.312500 (85.266583)\n",
            "[2,   801]  loss 0.376571 (0.585748) train_acc 85.937500 (85.917993)\n",
            "[2,   901]  loss 0.304416 (0.552037) train_acc 92.187500 (86.502844)\n",
            "[3,     1]  loss 0.288517 (0.288517) train_acc 90.625000 (90.625000)\n",
            "[3,   101]  loss 0.242428 (0.278101) train_acc 87.500000 (91.506807)\n",
            "[3,   201]  loss 0.232365 (0.269756) train_acc 90.625000 (91.837687)\n",
            "[3,   301]  loss 0.171139 (0.272384) train_acc 92.187500 (91.689161)\n",
            "[3,   401]  loss 0.352211 (0.267843) train_acc 84.375000 (91.790056)\n",
            "[3,   501]  loss 0.387578 (0.261007) train_acc 89.062500 (91.950474)\n",
            "[3,   601]  loss 0.151508 (0.256226) train_acc 95.312500 (92.112105)\n",
            "[3,   701]  loss 0.217611 (0.252949) train_acc 92.187500 (92.145150)\n",
            "[3,   801]  loss 0.159826 (0.250876) train_acc 95.312500 (92.199204)\n",
            "[3,   901]  loss 0.217516 (0.247917) train_acc 93.750000 (92.291551)\n",
            "[4,     1]  loss 0.362092 (0.362092) train_acc 90.625000 (90.625000)\n",
            "[4,   101]  loss 0.168696 (0.179643) train_acc 95.312500 (94.461634)\n",
            "[4,   201]  loss 0.268132 (0.185740) train_acc 92.187500 (94.247512)\n",
            "[4,   301]  loss 0.083073 (0.186154) train_acc 98.437500 (94.134136)\n",
            "[4,   401]  loss 0.094814 (0.185414) train_acc 98.437500 (94.221478)\n",
            "[4,   501]  loss 0.265425 (0.183120) train_acc 89.062500 (94.292665)\n",
            "[4,   601]  loss 0.200772 (0.182831) train_acc 92.187500 (94.288166)\n",
            "[4,   701]  loss 0.061511 (0.179949) train_acc 98.437500 (94.371879)\n",
            "[4,   801]  loss 0.083004 (0.177093) train_acc 98.437500 (94.483458)\n",
            "[4,   901]  loss 0.188908 (0.173438) train_acc 93.750000 (94.598016)\n",
            "[5,     1]  loss 0.178856 (0.178856) train_acc 95.312500 (95.312500)\n",
            "[5,   101]  loss 0.104879 (0.140605) train_acc 95.312500 (95.575495)\n",
            "[5,   201]  loss 0.214131 (0.141314) train_acc 95.312500 (95.569030)\n",
            "[5,   301]  loss 0.185779 (0.139616) train_acc 90.625000 (95.623962)\n",
            "[5,   401]  loss 0.108482 (0.139402) train_acc 96.875000 (95.635910)\n",
            "[5,   501]  loss 0.144845 (0.139020) train_acc 96.875000 (95.677395)\n",
            "[5,   601]  loss 0.157321 (0.139302) train_acc 96.875000 (95.637479)\n",
            "[5,   701]  loss 0.393124 (0.137990) train_acc 93.750000 (95.707026)\n",
            "[5,   801]  loss 0.121730 (0.135771) train_acc 95.312500 (95.784566)\n",
            "[5,   901]  loss 0.045606 (0.135117) train_acc 98.437500 (95.825819)\n",
            "[6,     1]  loss 0.102073 (0.102073) train_acc 95.312500 (95.312500)\n",
            "[6,   101]  loss 0.033389 (0.124927) train_acc 100.000000 (96.008663)\n",
            "[6,   201]  loss 0.089275 (0.122538) train_acc 98.437500 (96.136505)\n",
            "[6,   301]  loss 0.242794 (0.121705) train_acc 92.187500 (96.122301)\n",
            "[6,   401]  loss 0.268507 (0.119325) train_acc 93.750000 (96.181421)\n",
            "[6,   501]  loss 0.155881 (0.117984) train_acc 93.750000 (96.226297)\n",
            "[6,   601]  loss 0.132573 (0.117445) train_acc 95.312500 (96.295237)\n",
            "[6,   701]  loss 0.222789 (0.117787) train_acc 92.187500 (96.286555)\n",
            "[6,   801]  loss 0.158997 (0.116819) train_acc 98.437500 (96.326857)\n",
            "[6,   901]  loss 0.157587 (0.115194) train_acc 93.750000 (96.375555)\n",
            "[7,     1]  loss 0.171446 (0.171446) train_acc 95.312500 (95.312500)\n",
            "[7,   101]  loss 0.267069 (0.106350) train_acc 93.750000 (96.596535)\n",
            "[7,   201]  loss 0.040982 (0.101666) train_acc 98.437500 (96.742848)\n",
            "[7,   301]  loss 0.033679 (0.101315) train_acc 100.000000 (96.771179)\n",
            "[7,   401]  loss 0.170485 (0.102408) train_acc 95.312500 (96.765898)\n",
            "[7,   501]  loss 0.055346 (0.103216) train_acc 98.437500 (96.750250)\n",
            "[7,   601]  loss 0.090334 (0.104868) train_acc 96.875000 (96.721610)\n",
            "[7,   701]  loss 0.072589 (0.102832) train_acc 96.875000 (96.774697)\n",
            "[7,   801]  loss 0.018816 (0.100177) train_acc 100.000000 (96.851592)\n",
            "[7,   901]  loss 0.038769 (0.101232) train_acc 98.437500 (96.836848)\n",
            "[8,     1]  loss 0.031027 (0.031027) train_acc 100.000000 (100.000000)\n",
            "[8,   101]  loss 0.117628 (0.089785) train_acc 93.750000 (97.184406)\n",
            "[8,   201]  loss 0.112968 (0.089153) train_acc 95.312500 (97.193719)\n",
            "[8,   301]  loss 0.067672 (0.091724) train_acc 98.437500 (97.129360)\n",
            "[8,   401]  loss 0.060425 (0.091556) train_acc 98.437500 (97.175031)\n",
            "[8,   501]  loss 0.049968 (0.092553) train_acc 98.437500 (97.168164)\n",
            "[8,   601]  loss 0.035431 (0.090735) train_acc 98.437500 (97.202579)\n",
            "[8,   701]  loss 0.275594 (0.091130) train_acc 93.750000 (97.162536)\n",
            "[8,   801]  loss 0.157625 (0.089993) train_acc 96.875000 (97.200765)\n",
            "[8,   901]  loss 0.148627 (0.089424) train_acc 95.312500 (97.207963)\n",
            "[9,     1]  loss 0.052153 (0.052153) train_acc 98.437500 (98.437500)\n",
            "[9,   101]  loss 0.051693 (0.088331) train_acc 98.437500 (97.292698)\n",
            "[9,   201]  loss 0.045907 (0.085576) train_acc 98.437500 (97.302550)\n",
            "[9,   301]  loss 0.091532 (0.086140) train_acc 96.875000 (97.290282)\n",
            "[9,   401]  loss 0.102736 (0.082717) train_acc 96.875000 (97.397132)\n",
            "[9,   501]  loss 0.033245 (0.082530) train_acc 100.000000 (97.436377)\n",
            "[9,   601]  loss 0.026625 (0.084123) train_acc 100.000000 (97.415765)\n",
            "[9,   701]  loss 0.036073 (0.084038) train_acc 98.437500 (97.409950)\n",
            "[9,   801]  loss 0.204828 (0.083140) train_acc 93.750000 (97.432896)\n",
            "[9,   901]  loss 0.096338 (0.082500) train_acc 96.875000 (97.412597)\n",
            "[10,     1]  loss 0.015489 (0.015489) train_acc 100.000000 (100.000000)\n",
            "[10,   101]  loss 0.090775 (0.073915) train_acc 96.875000 (97.663985)\n",
            "[10,   201]  loss 0.105013 (0.075900) train_acc 96.875000 (97.636816)\n",
            "[10,   301]  loss 0.147889 (0.073330) train_acc 92.187500 (97.658846)\n",
            "[10,   401]  loss 0.063855 (0.073080) train_acc 98.437500 (97.697163)\n",
            "[10,   501]  loss 0.096215 (0.073660) train_acc 93.750000 (97.692116)\n",
            "[10,   601]  loss 0.057940 (0.073806) train_acc 96.875000 (97.725146)\n",
            "[10,   701]  loss 0.144424 (0.073537) train_acc 92.187500 (97.739836)\n",
            "[10,   801]  loss 0.071175 (0.074327) train_acc 96.875000 (97.711845)\n",
            "[10,   901]  loss 0.094579 (0.074751) train_acc 98.437500 (97.712611)\n",
            "[11,     1]  loss 0.072733 (0.072733) train_acc 96.875000 (96.875000)\n",
            "[11,   101]  loss 0.063188 (0.064717) train_acc 96.875000 (97.942450)\n",
            "[11,   201]  loss 0.037046 (0.069177) train_acc 98.437500 (97.745647)\n",
            "[11,   301]  loss 0.056117 (0.071340) train_acc 98.437500 (97.726329)\n",
            "[11,   401]  loss 0.087080 (0.069252) train_acc 98.437500 (97.794576)\n",
            "[11,   501]  loss 0.047506 (0.069916) train_acc 98.437500 (97.773204)\n",
            "[11,   601]  loss 0.029867 (0.070493) train_acc 98.437500 (97.758943)\n",
            "[11,   701]  loss 0.020704 (0.069416) train_acc 100.000000 (97.777728)\n",
            "[11,   801]  loss 0.082215 (0.069413) train_acc 96.875000 (97.795724)\n",
            "[11,   901]  loss 0.041420 (0.068739) train_acc 98.437500 (97.809725)\n",
            "[12,     1]  loss 0.020738 (0.020738) train_acc 100.000000 (100.000000)\n",
            "[12,   101]  loss 0.132520 (0.066050) train_acc 96.875000 (98.035272)\n",
            "[12,   201]  loss 0.020328 (0.067793) train_acc 98.437500 (97.916667)\n",
            "[12,   301]  loss 0.072360 (0.066677) train_acc 96.875000 (97.959925)\n",
            "[12,   401]  loss 0.203802 (0.066156) train_acc 95.312500 (97.930954)\n",
            "[12,   501]  loss 0.012309 (0.065765) train_acc 100.000000 (97.966567)\n",
            "[12,   601]  loss 0.045110 (0.064721) train_acc 96.875000 (97.995528)\n",
            "[12,   701]  loss 0.287520 (0.064756) train_acc 96.875000 (98.013998)\n",
            "[12,   801]  loss 0.010458 (0.064823) train_acc 100.000000 (98.016152)\n",
            "[12,   901]  loss 0.026390 (0.063655) train_acc 100.000000 (98.033435)\n",
            "[13,     1]  loss 0.039520 (0.039520) train_acc 98.437500 (98.437500)\n",
            "[13,   101]  loss 0.033688 (0.061927) train_acc 100.000000 (98.081683)\n",
            "[13,   201]  loss 0.013745 (0.060907) train_acc 100.000000 (98.079913)\n",
            "[13,   301]  loss 0.066320 (0.056833) train_acc 98.437500 (98.188331)\n",
            "[13,   401]  loss 0.010429 (0.058644) train_acc 100.000000 (98.160848)\n",
            "[13,   501]  loss 0.091830 (0.060544) train_acc 96.875000 (98.060130)\n",
            "[13,   601]  loss 0.013941 (0.060422) train_acc 100.000000 (98.076123)\n",
            "[13,   701]  loss 0.075156 (0.060278) train_acc 98.437500 (98.087553)\n",
            "[13,   801]  loss 0.132532 (0.060734) train_acc 93.750000 (98.066870)\n",
            "[13,   901]  loss 0.016626 (0.060998) train_acc 100.000000 (98.061182)\n",
            "[14,     1]  loss 0.032147 (0.032147) train_acc 98.437500 (98.437500)\n",
            "[14,   101]  loss 0.073677 (0.047851) train_acc 96.875000 (98.592203)\n",
            "[14,   201]  loss 0.022642 (0.056259) train_acc 100.000000 (98.282027)\n",
            "[14,   301]  loss 0.072713 (0.060426) train_acc 96.875000 (98.162375)\n",
            "[14,   401]  loss 0.103081 (0.058826) train_acc 95.312500 (98.168641)\n",
            "[14,   501]  loss 0.085889 (0.058218) train_acc 98.437500 (98.181761)\n",
            "[14,   601]  loss 0.167497 (0.057446) train_acc 95.312500 (98.200915)\n",
            "[14,   701]  loss 0.013095 (0.056013) train_acc 100.000000 (98.270328)\n",
            "[14,   801]  loss 0.021746 (0.056008) train_acc 100.000000 (98.287297)\n",
            "[14,   901]  loss 0.145443 (0.056637) train_acc 96.875000 (98.269284)\n",
            "[15,     1]  loss 0.104913 (0.104913) train_acc 96.875000 (96.875000)\n",
            "[15,   101]  loss 0.009781 (0.049831) train_acc 100.000000 (98.406559)\n",
            "[15,   201]  loss 0.005081 (0.047918) train_acc 100.000000 (98.460821)\n",
            "[15,   301]  loss 0.024571 (0.050655) train_acc 100.000000 (98.421927)\n",
            "[15,   401]  loss 0.083018 (0.052274) train_acc 96.875000 (98.394638)\n",
            "[15,   501]  loss 0.137679 (0.053624) train_acc 96.875000 (98.334581)\n",
            "[15,   601]  loss 0.067262 (0.053238) train_acc 98.437500 (98.328307)\n",
            "[15,   701]  loss 0.025749 (0.053818) train_acc 100.000000 (98.334968)\n",
            "[15,   801]  loss 0.100811 (0.054027) train_acc 96.875000 (98.341916)\n",
            "[15,   901]  loss 0.040471 (0.053821) train_acc 98.437500 (98.336917)\n",
            "[16,     1]  loss 0.024462 (0.024462) train_acc 100.000000 (100.000000)\n",
            "[16,   101]  loss 0.047599 (0.050869) train_acc 96.875000 (98.452970)\n",
            "[16,   201]  loss 0.071995 (0.053291) train_acc 96.875000 (98.289801)\n",
            "[16,   301]  loss 0.033915 (0.054316) train_acc 100.000000 (98.318106)\n",
            "[16,   401]  loss 0.066012 (0.051981) train_acc 96.875000 (98.382949)\n",
            "[16,   501]  loss 0.015917 (0.050907) train_acc 100.000000 (98.434381)\n",
            "[16,   601]  loss 0.015357 (0.050693) train_acc 100.000000 (98.442700)\n",
            "[16,   701]  loss 0.057103 (0.050954) train_acc 96.875000 (98.448645)\n",
            "[16,   801]  loss 0.068351 (0.049712) train_acc 98.437500 (98.462859)\n",
            "[16,   901]  loss 0.013489 (0.050565) train_acc 100.000000 (98.449639)\n",
            "[17,     1]  loss 0.054813 (0.054813) train_acc 98.437500 (98.437500)\n",
            "[17,   101]  loss 0.017434 (0.048760) train_acc 100.000000 (98.499381)\n",
            "[17,   201]  loss 0.061202 (0.050014) train_acc 96.875000 (98.445274)\n",
            "[17,   301]  loss 0.041155 (0.047433) train_acc 98.437500 (98.504983)\n",
            "[17,   401]  loss 0.015158 (0.047482) train_acc 100.000000 (98.476465)\n",
            "[17,   501]  loss 0.050409 (0.046984) train_acc 98.437500 (98.540419)\n",
            "[17,   601]  loss 0.086951 (0.046577) train_acc 96.875000 (98.518095)\n",
            "[17,   701]  loss 0.042948 (0.047214) train_acc 98.437500 (98.499911)\n",
            "[17,   801]  loss 0.030807 (0.047646) train_acc 100.000000 (98.509675)\n",
            "[17,   901]  loss 0.073262 (0.047683) train_acc 96.875000 (98.506867)\n",
            "[18,     1]  loss 0.057370 (0.057370) train_acc 96.875000 (96.875000)\n",
            "[18,   101]  loss 0.059751 (0.039456) train_acc 96.875000 (98.638614)\n",
            "[18,   201]  loss 0.142978 (0.041439) train_acc 95.312500 (98.561878)\n",
            "[18,   301]  loss 0.053892 (0.042333) train_acc 96.875000 (98.619186)\n",
            "[18,   401]  loss 0.044799 (0.043058) train_acc 96.875000 (98.612843)\n",
            "[18,   501]  loss 0.014367 (0.044388) train_acc 100.000000 (98.593438)\n",
            "[18,   601]  loss 0.095131 (0.044725) train_acc 96.875000 (98.564892)\n",
            "[18,   701]  loss 0.045931 (0.045944) train_acc 96.875000 (98.542261)\n",
            "[18,   801]  loss 0.006968 (0.045631) train_acc 100.000000 (98.581851)\n",
            "[18,   901]  loss 0.087518 (0.045639) train_acc 96.875000 (98.577969)\n",
            "[19,     1]  loss 0.048803 (0.048803) train_acc 96.875000 (96.875000)\n",
            "[19,   101]  loss 0.122477 (0.039654) train_acc 95.312500 (98.808787)\n",
            "[19,   201]  loss 0.113517 (0.038900) train_acc 98.437500 (98.880597)\n",
            "[19,   301]  loss 0.026974 (0.039785) train_acc 98.437500 (98.795681)\n",
            "[19,   401]  loss 0.020117 (0.042320) train_acc 98.437500 (98.698566)\n",
            "[19,   501]  loss 0.002163 (0.041556) train_acc 100.000000 (98.705714)\n",
            "[19,   601]  loss 0.083915 (0.042568) train_acc 98.437500 (98.681884)\n",
            "[19,   701]  loss 0.111470 (0.043150) train_acc 96.875000 (98.662625)\n",
            "[19,   801]  loss 0.047452 (0.043614) train_acc 98.437500 (98.646223)\n",
            "[19,   901]  loss 0.019825 (0.043696) train_acc 100.000000 (98.645602)\n",
            "[20,     1]  loss 0.006836 (0.006836) train_acc 100.000000 (100.000000)\n",
            "[20,   101]  loss 0.045754 (0.049939) train_acc 98.437500 (98.468441)\n",
            "[20,   201]  loss 0.070759 (0.044362) train_acc 98.437500 (98.600746)\n",
            "[20,   301]  loss 0.005832 (0.041710) train_acc 100.000000 (98.681478)\n",
            "[20,   401]  loss 0.018529 (0.040720) train_acc 100.000000 (98.729738)\n",
            "[20,   501]  loss 0.024784 (0.040941) train_acc 100.000000 (98.715070)\n",
            "[20,   601]  loss 0.113816 (0.042074) train_acc 93.750000 (98.705283)\n",
            "[20,   701]  loss 0.071797 (0.042588) train_acc 98.437500 (98.711662)\n",
            "[20,   801]  loss 0.035786 (0.042146) train_acc 98.437500 (98.724251)\n",
            "[20,   901]  loss 0.046464 (0.042049) train_acc 98.437500 (98.711501)\n",
            "Finished Training\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "qnet = qnet.cpu()\n",
        "torch.quantization.convert(qnet, inplace=True)\n",
        "print(\"Size of model after quantization\")\n",
        "print_size_of_model(qnet)\n",
        "\n",
        "score = test(qnet, testloader, cuda=False)\n",
        "print('Accuracy of the fused and quantized network (trained quantized) on the test images: {}% - INT8'.format(score))\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "luHr3a6t14gw",
        "outputId": "37899473-ed35-444c-a88d-30d640b4b221"
      },
      "execution_count": 39,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Size of model after quantization\n",
            "Size (MB): 0.055572\n",
            "Accuracy of the fused and quantized network (trained quantized) on the test images: 98.69% - INT8\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "qnet = Net(q=True)\n",
        "fuse_modules(qnet)\n",
        "qnet.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')\n",
        "torch.quantization.prepare_qat(qnet, inplace=True)\n",
        "qnet = qnet\n",
        "train(qnet, trainloader, cuda=False, q=True)\n",
        "qnet = qnet.cpu()\n",
        "torch.quantization.convert(qnet, inplace=True)\n",
        "print(\"Size of model after quantization\")\n",
        "print_size_of_model(qnet)\n",
        "\n",
        "score = test(qnet, testloader, cuda=False)\n",
        "print('Accuracy of the fused and quantized network (trained quantized) on the test images: {}% - INT8'.format(score))"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "CUp5tihq19pF",
        "outputId": "8199bb59-28a5-4fec-8093-100ced1f3707"
      },
      "execution_count": 40,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "[1,     1]  loss 2.302919 (2.302919) train_acc 9.375000 (9.375000)\n",
            "[1,   101]  loss 2.298957 (2.301176) train_acc 7.812500 (9.622525)\n",
            "[1,   201]  loss 2.288294 (2.298530) train_acc 26.562500 (11.333955)\n",
            "[1,   301]  loss 2.279651 (2.295540) train_acc 32.812500 (14.716570)\n",
            "[1,   401]  loss 2.281182 (2.291908) train_acc 32.812500 (18.512313)\n",
            "[1,   501]  loss 2.263526 (2.287215) train_acc 37.500000 (22.935379)\n",
            "[1,   601]  loss 2.245340 (2.280857) train_acc 37.500000 (26.812084)\n",
            "[1,   701]  loss 2.195398 (2.272054) train_acc 46.875000 (29.986180)\n",
            "[1,   801]  loss 2.135129 (2.259340) train_acc 64.062500 (33.222144)\n",
            "[1,   901]  loss 2.003094 (2.239379) train_acc 64.062500 (35.966981)\n",
            "[2,     1]  loss 1.919566 (1.919566) train_acc 59.375000 (59.375000)\n",
            "[2,   101]  loss 1.685679 (1.827426) train_acc 70.312500 (64.217203)\n",
            "[2,   201]  loss 1.360839 (1.675866) train_acc 67.187500 (66.309080)\n",
            "[2,   301]  loss 0.977317 (1.507209) train_acc 78.125000 (68.568314)\n",
            "[2,   401]  loss 0.727046 (1.354546) train_acc 79.687500 (70.293017)\n",
            "[2,   501]  loss 0.649970 (1.228790) train_acc 82.812500 (71.893713)\n",
            "[2,   601]  loss 0.412412 (1.126242) train_acc 87.500000 (73.333507)\n",
            "[2,   701]  loss 0.531338 (1.041010) train_acc 85.937500 (74.875178)\n",
            "[2,   801]  loss 0.560508 (0.968906) train_acc 81.250000 (76.211376)\n",
            "[2,   901]  loss 0.381834 (0.907863) train_acc 85.937500 (77.410516)\n",
            "[3,     1]  loss 0.517291 (0.517291) train_acc 82.812500 (82.812500)\n",
            "[3,   101]  loss 0.321957 (0.371826) train_acc 92.187500 (88.985149)\n",
            "[3,   201]  loss 0.420531 (0.352606) train_acc 89.062500 (89.388993)\n",
            "[3,   301]  loss 0.246595 (0.337235) train_acc 92.187500 (89.950166)\n",
            "[3,   401]  loss 0.226431 (0.323071) train_acc 92.187500 (90.328865)\n",
            "[3,   501]  loss 0.151703 (0.313079) train_acc 96.875000 (90.593812)\n",
            "[3,   601]  loss 0.258228 (0.301104) train_acc 95.312500 (90.939580)\n",
            "[3,   701]  loss 0.219036 (0.290810) train_acc 95.312500 (91.220132)\n",
            "[3,   801]  loss 0.292119 (0.281709) train_acc 87.500000 (91.535971)\n",
            "[3,   901]  loss 0.199081 (0.271815) train_acc 93.750000 (91.849334)\n",
            "[4,     1]  loss 0.177415 (0.177415) train_acc 92.187500 (92.187500)\n",
            "[4,   101]  loss 0.181026 (0.189110) train_acc 92.187500 (93.935644)\n",
            "[4,   201]  loss 0.225432 (0.189963) train_acc 92.187500 (93.921020)\n",
            "[4,   301]  loss 0.251736 (0.188838) train_acc 93.750000 (94.102990)\n",
            "[4,   401]  loss 0.093129 (0.183558) train_acc 96.875000 (94.365648)\n",
            "[4,   501]  loss 0.273615 (0.181126) train_acc 92.187500 (94.433009)\n",
            "[4,   601]  loss 0.187969 (0.179798) train_acc 92.187500 (94.470154)\n",
            "[4,   701]  loss 0.159697 (0.176823) train_acc 96.875000 (94.585859)\n",
            "[4,   801]  loss 0.106929 (0.173782) train_acc 98.437500 (94.660971)\n",
            "[4,   901]  loss 0.131832 (0.169677) train_acc 96.875000 (94.799181)\n",
            "[5,     1]  loss 0.258544 (0.258544) train_acc 92.187500 (92.187500)\n",
            "[5,   101]  loss 0.114397 (0.139824) train_acc 93.750000 (95.915842)\n",
            "[5,   201]  loss 0.083801 (0.141997) train_acc 96.875000 (95.802239)\n",
            "[5,   301]  loss 0.109489 (0.142474) train_acc 98.437500 (95.748547)\n",
            "[5,   401]  loss 0.135368 (0.141351) train_acc 93.750000 (95.799564)\n",
            "[5,   501]  loss 0.029210 (0.139675) train_acc 100.000000 (95.848927)\n",
            "[5,   601]  loss 0.099611 (0.138640) train_acc 95.312500 (95.879264)\n",
            "[5,   701]  loss 0.104885 (0.137296) train_acc 96.875000 (95.943295)\n",
            "[5,   801]  loss 0.123801 (0.135790) train_acc 96.875000 (95.967931)\n",
            "[5,   901]  loss 0.245181 (0.134164) train_acc 93.750000 (95.992300)\n",
            "[6,     1]  loss 0.072366 (0.072366) train_acc 96.875000 (96.875000)\n",
            "[6,   101]  loss 0.059081 (0.126472) train_acc 96.875000 (96.256188)\n",
            "[6,   201]  loss 0.270916 (0.119472) train_acc 93.750000 (96.462998)\n",
            "[6,   301]  loss 0.098573 (0.121401) train_acc 96.875000 (96.418189)\n",
            "[6,   401]  loss 0.075224 (0.118136) train_acc 98.437500 (96.481453)\n",
            "[6,   501]  loss 0.135801 (0.116378) train_acc 93.750000 (96.482036)\n",
            "[6,   601]  loss 0.160378 (0.115904) train_acc 93.750000 (96.503224)\n",
            "[6,   701]  loss 0.119351 (0.115417) train_acc 95.312500 (96.522825)\n",
            "[6,   801]  loss 0.097917 (0.114482) train_acc 96.875000 (96.572644)\n",
            "[6,   901]  loss 0.140018 (0.113354) train_acc 95.312500 (96.602733)\n",
            "[7,     1]  loss 0.094187 (0.094187) train_acc 96.875000 (96.875000)\n",
            "[7,   101]  loss 0.064431 (0.101450) train_acc 98.437500 (97.122525)\n",
            "[7,   201]  loss 0.112177 (0.099699) train_acc 96.875000 (97.030473)\n",
            "[7,   301]  loss 0.135499 (0.101849) train_acc 98.437500 (97.020349)\n",
            "[7,   401]  loss 0.094675 (0.101648) train_acc 98.437500 (96.980206)\n",
            "[7,   501]  loss 0.032729 (0.101480) train_acc 100.000000 (96.952969)\n",
            "[7,   601]  loss 0.083729 (0.101968) train_acc 98.437500 (96.947795)\n",
            "[7,   701]  loss 0.147598 (0.101988) train_acc 96.875000 (96.941869)\n",
            "[7,   801]  loss 0.108678 (0.101530) train_acc 96.875000 (96.945225)\n",
            "[7,   901]  loss 0.054971 (0.100466) train_acc 98.437500 (96.973849)\n",
            "[8,     1]  loss 0.026489 (0.026489) train_acc 100.000000 (100.000000)\n",
            "[8,   101]  loss 0.089904 (0.100893) train_acc 98.437500 (97.029703)\n",
            "[8,   201]  loss 0.045742 (0.093519) train_acc 100.000000 (97.170398)\n",
            "[8,   301]  loss 0.034391 (0.093478) train_acc 98.437500 (97.144934)\n",
            "[8,   401]  loss 0.053418 (0.092319) train_acc 98.437500 (97.178928)\n",
            "[8,   501]  loss 0.022759 (0.091766) train_acc 100.000000 (97.227420)\n",
            "[8,   601]  loss 0.163144 (0.090504) train_acc 95.312500 (97.270175)\n",
            "[8,   701]  loss 0.032941 (0.091461) train_acc 100.000000 (97.211573)\n",
            "[8,   801]  loss 0.110442 (0.090902) train_acc 96.875000 (97.247581)\n",
            "[8,   901]  loss 0.079954 (0.090808) train_acc 98.437500 (97.247850)\n",
            "[9,     1]  loss 0.092796 (0.092796) train_acc 96.875000 (96.875000)\n",
            "[9,   101]  loss 0.108255 (0.087122) train_acc 98.437500 (97.370050)\n",
            "[9,   201]  loss 0.207156 (0.083768) train_acc 92.187500 (97.496891)\n",
            "[9,   301]  loss 0.194061 (0.084519) train_acc 95.312500 (97.492733)\n",
            "[9,   401]  loss 0.117191 (0.085505) train_acc 96.875000 (97.486752)\n",
            "[9,   501]  loss 0.109972 (0.084657) train_acc 96.875000 (97.523703)\n",
            "[9,   601]  loss 0.120699 (0.083162) train_acc 96.875000 (97.563956)\n",
            "[9,   701]  loss 0.145728 (0.082891) train_acc 95.312500 (97.559290)\n",
            "[9,   801]  loss 0.110853 (0.083190) train_acc 96.875000 (97.526529)\n",
            "[9,   901]  loss 0.089482 (0.083492) train_acc 95.312500 (97.514914)\n",
            "[10,     1]  loss 0.064985 (0.064985) train_acc 98.437500 (98.437500)\n",
            "[10,   101]  loss 0.086389 (0.080230) train_acc 98.437500 (97.493812)\n",
            "[10,   201]  loss 0.199889 (0.077490) train_acc 96.875000 (97.535759)\n",
            "[10,   301]  loss 0.049187 (0.078239) train_acc 98.437500 (97.580980)\n",
            "[10,   401]  loss 0.091664 (0.079570) train_acc 96.875000 (97.556889)\n",
            "[10,   501]  loss 0.114360 (0.079413) train_acc 95.312500 (97.595434)\n",
            "[10,   601]  loss 0.062544 (0.079353) train_acc 98.437500 (97.597754)\n",
            "[10,   701]  loss 0.048744 (0.078409) train_acc 98.437500 (97.628388)\n",
            "[10,   801]  loss 0.193179 (0.079054) train_acc 95.312500 (97.614310)\n",
            "[10,   901]  loss 0.080743 (0.078087) train_acc 98.437500 (97.638041)\n",
            "[11,     1]  loss 0.166541 (0.166541) train_acc 95.312500 (95.312500)\n",
            "[11,   101]  loss 0.024554 (0.080243) train_acc 98.437500 (97.586634)\n",
            "[11,   201]  loss 0.083221 (0.076594) train_acc 96.875000 (97.566853)\n",
            "[11,   301]  loss 0.040043 (0.075913) train_acc 100.000000 (97.664037)\n",
            "[11,   401]  loss 0.013843 (0.073714) train_acc 100.000000 (97.724439)\n",
            "[11,   501]  loss 0.074017 (0.073134) train_acc 96.875000 (97.760729)\n",
            "[11,   601]  loss 0.056648 (0.072510) train_acc 98.437500 (97.790141)\n",
            "[11,   701]  loss 0.066539 (0.072506) train_acc 96.875000 (97.804476)\n",
            "[11,   801]  loss 0.048723 (0.072620) train_acc 98.437500 (97.821083)\n",
            "[11,   901]  loss 0.083106 (0.072472) train_acc 96.875000 (97.813194)\n",
            "[12,     1]  loss 0.024558 (0.024558) train_acc 100.000000 (100.000000)\n",
            "[12,   101]  loss 0.030619 (0.066547) train_acc 100.000000 (98.019802)\n",
            "[12,   201]  loss 0.056613 (0.070702) train_acc 100.000000 (97.846704)\n",
            "[12,   301]  loss 0.088146 (0.070671) train_acc 96.875000 (97.819767)\n",
            "[12,   401]  loss 0.013585 (0.068576) train_acc 100.000000 (97.954333)\n",
            "[12,   501]  loss 0.017302 (0.067678) train_acc 100.000000 (97.966567)\n",
            "[12,   601]  loss 0.055307 (0.068016) train_acc 98.437500 (97.951331)\n",
            "[12,   701]  loss 0.124521 (0.069147) train_acc 95.312500 (97.907008)\n",
            "[12,   801]  loss 0.050573 (0.068998) train_acc 98.437500 (97.912765)\n",
            "[12,   901]  loss 0.071213 (0.068547) train_acc 98.437500 (97.951928)\n",
            "[13,     1]  loss 0.033516 (0.033516) train_acc 98.437500 (98.437500)\n",
            "[13,   101]  loss 0.026129 (0.059846) train_acc 100.000000 (98.081683)\n",
            "[13,   201]  loss 0.028550 (0.063143) train_acc 100.000000 (98.048818)\n",
            "[13,   301]  loss 0.073288 (0.063000) train_acc 98.437500 (98.001453)\n",
            "[13,   401]  loss 0.060537 (0.064590) train_acc 98.437500 (97.981608)\n",
            "[13,   501]  loss 0.021571 (0.063868) train_acc 100.000000 (97.991517)\n",
            "[13,   601]  loss 0.051797 (0.063840) train_acc 96.875000 (98.000728)\n",
            "[13,   701]  loss 0.013184 (0.063827) train_acc 100.000000 (98.002853)\n",
            "[13,   801]  loss 0.087866 (0.064420) train_acc 96.875000 (97.998596)\n",
            "[13,   901]  loss 0.091600 (0.064860) train_acc 96.875000 (98.005688)\n",
            "[14,     1]  loss 0.158242 (0.158242) train_acc 98.437500 (98.437500)\n",
            "[14,   101]  loss 0.047424 (0.060089) train_acc 98.437500 (98.128094)\n",
            "[14,   201]  loss 0.041813 (0.058044) train_acc 98.437500 (98.336443)\n",
            "[14,   301]  loss 0.079950 (0.059526) train_acc 96.875000 (98.261005)\n",
            "[14,   401]  loss 0.022712 (0.059295) train_acc 100.000000 (98.254364)\n",
            "[14,   501]  loss 0.055382 (0.062132) train_acc 98.437500 (98.134980)\n",
            "[14,   601]  loss 0.017451 (0.062751) train_acc 100.000000 (98.112521)\n",
            "[14,   701]  loss 0.040732 (0.062575) train_acc 98.437500 (98.107614)\n",
            "[14,   801]  loss 0.064761 (0.062605) train_acc 96.875000 (98.113686)\n",
            "[14,   901]  loss 0.071974 (0.062637) train_acc 96.875000 (98.116676)\n",
            "[15,     1]  loss 0.026733 (0.026733) train_acc 100.000000 (100.000000)\n",
            "[15,   101]  loss 0.018405 (0.061877) train_acc 100.000000 (98.282797)\n",
            "[15,   201]  loss 0.069144 (0.062218) train_acc 95.312500 (98.111007)\n",
            "[15,   301]  loss 0.014280 (0.063621) train_acc 100.000000 (98.058555)\n",
            "[15,   401]  loss 0.028365 (0.061383) train_acc 98.437500 (98.149158)\n",
            "[15,   501]  loss 0.041689 (0.060115) train_acc 100.000000 (98.175524)\n",
            "[15,   601]  loss 0.070189 (0.060619) train_acc 98.437500 (98.128120)\n",
            "[15,   701]  loss 0.275282 (0.060933) train_acc 93.750000 (98.123217)\n",
            "[15,   801]  loss 0.029783 (0.060075) train_acc 100.000000 (98.172207)\n",
            "[15,   901]  loss 0.028618 (0.059222) train_acc 98.437500 (98.186043)\n",
            "[16,     1]  loss 0.046160 (0.046160) train_acc 98.437500 (98.437500)\n",
            "[16,   101]  loss 0.110157 (0.069398) train_acc 96.875000 (98.004332)\n",
            "[16,   201]  loss 0.157249 (0.063327) train_acc 95.312500 (98.173197)\n",
            "[16,   301]  loss 0.013502 (0.059892) train_acc 100.000000 (98.183140)\n",
            "[16,   401]  loss 0.105046 (0.059540) train_acc 96.875000 (98.215399)\n",
            "[16,   501]  loss 0.015899 (0.057884) train_acc 100.000000 (98.228543)\n",
            "[16,   601]  loss 0.085243 (0.057173) train_acc 98.437500 (98.260711)\n",
            "[16,   701]  loss 0.015298 (0.057450) train_acc 100.000000 (98.268099)\n",
            "[16,   801]  loss 0.040172 (0.057052) train_acc 100.000000 (98.277544)\n",
            "[16,   901]  loss 0.020045 (0.056543) train_acc 100.000000 (98.283158)\n",
            "[17,     1]  loss 0.134798 (0.134798) train_acc 98.437500 (98.437500)\n",
            "[17,   101]  loss 0.057062 (0.053539) train_acc 98.437500 (98.452970)\n",
            "[17,   201]  loss 0.046546 (0.053981) train_acc 98.437500 (98.383085)\n",
            "[17,   301]  loss 0.069260 (0.055416) train_acc 96.875000 (98.354444)\n",
            "[17,   401]  loss 0.030129 (0.055108) train_acc 98.437500 (98.390742)\n",
            "[17,   501]  loss 0.014226 (0.055711) train_acc 100.000000 (98.365768)\n",
            "[17,   601]  loss 0.010437 (0.056151) train_acc 100.000000 (98.354305)\n",
            "[17,   701]  loss 0.041374 (0.055515) train_acc 98.437500 (98.355029)\n",
            "[17,   801]  loss 0.023789 (0.054849) train_acc 100.000000 (98.375078)\n",
            "[17,   901]  loss 0.042195 (0.054904) train_acc 98.437500 (98.376804)\n",
            "[18,     1]  loss 0.050334 (0.050334) train_acc 98.437500 (98.437500)\n",
            "[18,   101]  loss 0.035745 (0.046604) train_acc 98.437500 (98.654084)\n",
            "[18,   201]  loss 0.146715 (0.047022) train_acc 98.437500 (98.616294)\n",
            "[18,   301]  loss 0.162472 (0.051811) train_acc 96.875000 (98.468646)\n",
            "[18,   401]  loss 0.083807 (0.052808) train_acc 98.437500 (98.425810)\n",
            "[18,   501]  loss 0.053617 (0.052496) train_acc 98.437500 (98.393837)\n",
            "[18,   601]  loss 0.032901 (0.053241) train_acc 98.437500 (98.364705)\n",
            "[18,   701]  loss 0.023949 (0.053093) train_acc 100.000000 (98.357257)\n",
            "[18,   801]  loss 0.063497 (0.053003) train_acc 98.437500 (98.371177)\n",
            "[18,   901]  loss 0.061869 (0.052920) train_acc 98.437500 (98.369867)\n",
            "[19,     1]  loss 0.108780 (0.108780) train_acc 93.750000 (93.750000)\n",
            "[19,   101]  loss 0.006421 (0.051351) train_acc 100.000000 (98.313738)\n",
            "[19,   201]  loss 0.068611 (0.051836) train_acc 98.437500 (98.336443)\n",
            "[19,   301]  loss 0.039518 (0.051291) train_acc 98.437500 (98.395972)\n",
            "[19,   401]  loss 0.073621 (0.051959) train_acc 96.875000 (98.421914)\n",
            "[19,   501]  loss 0.074666 (0.051803) train_acc 96.875000 (98.437500)\n",
            "[19,   601]  loss 0.076713 (0.051697) train_acc 96.875000 (98.442700)\n",
            "[19,   701]  loss 0.010560 (0.052126) train_acc 100.000000 (98.408524)\n",
            "[19,   801]  loss 0.038075 (0.051025) train_acc 98.437500 (98.429697)\n",
            "[19,   901]  loss 0.036474 (0.050282) train_acc 98.437500 (98.446171)\n",
            "[20,     1]  loss 0.078122 (0.078122) train_acc 96.875000 (96.875000)\n",
            "[20,   101]  loss 0.148045 (0.056610) train_acc 96.875000 (98.220916)\n",
            "[20,   201]  loss 0.069455 (0.050649) train_acc 95.312500 (98.421953)\n",
            "[20,   301]  loss 0.009029 (0.050585) train_acc 100.000000 (98.489410)\n",
            "[20,   401]  loss 0.041003 (0.051062) train_acc 100.000000 (98.441397)\n",
            "[20,   501]  loss 0.071641 (0.050813) train_acc 96.875000 (98.459331)\n",
            "[20,   601]  loss 0.028565 (0.049796) train_acc 98.437500 (98.466098)\n",
            "[20,   701]  loss 0.034692 (0.050012) train_acc 98.437500 (98.468705)\n",
            "[20,   801]  loss 0.150377 (0.049457) train_acc 98.437500 (98.490169)\n",
            "[20,   901]  loss 0.042397 (0.049354) train_acc 98.437500 (98.510336)\n",
            "Finished Training\n",
            "Size of model after quantization\n",
            "Size (MB): 0.055572\n",
            "Accuracy of the fused and quantized network (trained quantized) on the test images: 98.53% - INT8\n"
          ]
        }
      ]
    }
  ]
}