{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torchvision import datasets, transforms\n",
    "from torch.autograd import Variable\n",
    "from collections import namedtuple\n",
    "\n",
    "from PIL import Image\n",
    "import os\n",
    "import os.path\n",
    "import errno\n",
    "import codecs\n",
    "import copy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.cuda.device_count() 4\n",
      "torch.cuda.current_device() 0\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "1"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.manual_seed(0)\n",
    "np.random.seed(0)\n",
    "print(\"torch.cuda.device_count()\", torch.cuda.device_count())\n",
    "print(\"torch.cuda.current_device()\", torch.cuda.current_device())\n",
    "torch.cuda.set_device(1)\n",
    "print(\"torch.cuda.current_device()\", torch.cuda.current_device())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class MNIST(torch.utils.data.Dataset):\n",
    "  \"\"\"`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.\n",
    "  Args:\n",
    "    root (string): Root directory of dataset where ``processed/training.pt``\n",
    "      and  ``processed/test.pt`` exist.\n",
    "    dataset (string): If `train` or `valid`, creates dataset from ``training.pt``,\n",
    "      otherwise from ``test.pt``.\n",
    "    download (bool, optional): If true, downloads the dataset from the internet and\n",
    "      puts it in root directory. If dataset is already downloaded, it is not\n",
    "      downloaded again.\n",
    "    transform (callable, optional): A function/transform that  takes in an PIL image\n",
    "      and returns a transformed version. E.g, ``transforms.RandomCrop``\n",
    "    target_transform (callable, optional): A function/transform that takes in the\n",
    "      target and transforms it.\n",
    "  \"\"\"\n",
    "  urls = [\n",
    "    'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',\n",
    "    'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',\n",
    "    'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',\n",
    "    'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',\n",
    "  ]\n",
    "  raw_folder = 'raw'\n",
    "  processed_folder = 'processed'\n",
    "  training_file = 'training.pt'\n",
    "  test_file = 'test.pt'\n",
    "\n",
    "  def __init__(self, root, dataset='train', transform=None, target_transform=None, download=False):\n",
    "    self.root = os.path.expanduser(root)\n",
    "    self.transform = transform\n",
    "    self.target_transform = target_transform\n",
    "    self.dataset = dataset  # 'train', 'valid', or 'test'\n",
    "    self.cutoff = 50000 # split between train vs validation\n",
    "\n",
    "    if download:\n",
    "      self.download()\n",
    "\n",
    "    if not self._check_exists():\n",
    "      raise RuntimeError('Dataset not found.' +\n",
    "                        ' You can use download=True to download it')\n",
    "\n",
    "    if self.dataset == 'train' or self.dataset == 'valid':\n",
    "      full_train_data, full_train_labels = torch.load(os.path.join(root, self.processed_folder, self.training_file))\n",
    "      if self.dataset == 'train':\n",
    "        self.data = full_train_data[:self.cutoff]\n",
    "        self.labels = full_train_labels[:self.cutoff]\n",
    "      else:\n",
    "        self.data = full_train_data[self.cutoff:]\n",
    "        self.labels = full_train_labels[self.cutoff:]\n",
    "    else:\n",
    "      self.data, self.labels = torch.load(os.path.join(root, self.processed_folder, self.test_file))\n",
    "\n",
    "  def __getitem__(self, index):\n",
    "      \"\"\"\n",
    "      Args:\n",
    "        index (int): Index\n",
    "      Returns:\n",
    "        tuple: (image, target) where target is index of the target class.\n",
    "      \"\"\"\n",
    "      img, target = self.data[index], self.labels[index]\n",
    "\n",
    "      # doing this so that it is consistent with all other datasets\n",
    "      # to return a PIL Image\n",
    "      img = Image.fromarray(img.numpy(), mode='L')\n",
    "\n",
    "      if self.transform is not None:\n",
    "        img = self.transform(img)\n",
    "\n",
    "      if self.target_transform is not None:\n",
    "        target = self.target_transform(target)\n",
    "\n",
    "      return img, target\n",
    "\n",
    "  def __len__(self):\n",
    "    return len(self.data)\n",
    "\n",
    "  def _check_exists(self):\n",
    "    return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \\\n",
    "      os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))\n",
    "\n",
    "  def download(self):\n",
    "    \"\"\"Download the MNIST data if it doesn't exist in processed_folder already.\"\"\"\n",
    "    from six.moves import urllib\n",
    "    import gzip\n",
    "\n",
    "    if self._check_exists():\n",
    "      return\n",
    "\n",
    "    # download files\n",
    "    try:\n",
    "      os.makedirs(os.path.join(self.root, self.raw_folder))\n",
    "      os.makedirs(os.path.join(self.root, self.processed_folder))\n",
    "    except OSError as e:\n",
    "      if e.errno == errno.EEXIST:\n",
    "        pass\n",
    "      else:\n",
    "        raise\n",
    "\n",
    "    for url in self.urls:\n",
    "      print('Downloading ' + url)\n",
    "      data = urllib.request.urlopen(url)\n",
    "      filename = url.rpartition('/')[2]\n",
    "      file_path = os.path.join(self.root, self.raw_folder, filename)\n",
    "      with open(file_path, 'wb') as f:\n",
    "          f.write(data.read())\n",
    "      with open(file_path.replace('.gz', ''), 'wb') as out_f, \\\n",
    "              gzip.GzipFile(file_path) as zip_f:\n",
    "          out_f.write(zip_f.read())\n",
    "      os.unlink(file_path)\n",
    "\n",
    "    # process and save as torch files\n",
    "    print('Processing...')\n",
    "\n",
    "    training_set = (\n",
    "      read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')),\n",
    "      read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte'))\n",
    "    )\n",
    "    test_set = (\n",
    "      read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')),\n",
    "      read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte'))\n",
    "    )\n",
    "    with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:\n",
    "      torch.save(training_set, f)\n",
    "    with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:\n",
    "      torch.save(test_set, f)\n",
    "\n",
    "    print('Done!')\n",
    "\n",
    "\n",
    "def get_int(b):\n",
    "  return int(codecs.encode(b, 'hex'), 16)\n",
    "\n",
    "\n",
    "def parse_byte(b):\n",
    "  if isinstance(b, str):\n",
    "    return ord(b)\n",
    "  return b\n",
    "\n",
    "\n",
    "def read_label_file(path):\n",
    "  with open(path, 'rb') as f:\n",
    "    data = f.read()\n",
    "    assert get_int(data[:4]) == 2049\n",
    "    length = get_int(data[4:8])\n",
    "    labels = [parse_byte(b) for b in data[8:]]\n",
    "    assert len(labels) == length\n",
    "    return torch.LongTensor(labels)\n",
    "\n",
    "\n",
    "def read_image_file(path):\n",
    "  with open(path, 'rb') as f:\n",
    "    data = f.read()\n",
    "    assert get_int(data[:4]) == 2051\n",
    "    length = get_int(data[4:8])\n",
    "    num_rows = get_int(data[8:12])\n",
    "    num_cols = get_int(data[12:16])\n",
    "    images = []\n",
    "    idx = 16\n",
    "    for l in range(length):\n",
    "      img = []\n",
    "      images.append(img)\n",
    "      for r in range(num_rows):\n",
    "        row = []\n",
    "        img.append(row)\n",
    "        for c in range(num_cols):\n",
    "          row.append(parse_byte(data[idx]))\n",
    "          idx += 1\n",
    "    assert len(images) == length\n",
    "    return torch.ByteTensor(images).view(-1, 28, 28)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "Args = namedtuple('Args', ['batch_size', 'test_batch_size', 'epochs', 'lr', 'cuda', 'seed', 'log_interval'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "args = Args(batch_size=1000, test_batch_size=1000, epochs=30, lr=0.001, cuda=True, seed=0, log_interval=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "torch.manual_seed(args.seed)\n",
    "if args.cuda:\n",
    "  torch.cuda.manual_seed(args.seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n",
    "\n",
    "train_loader = torch.utils.data.DataLoader(\n",
    "  MNIST('MNIST_data', dataset='train', download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),\n",
    "  batch_size=args.batch_size, shuffle=True, **kwargs)\n",
    "\n",
    "valid_loader = torch.utils.data.DataLoader(\n",
    "  MNIST('MNIST_data', dataset='valid', download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),\n",
    "  batch_size=args.batch_size, shuffle=True, **kwargs)\n",
    "\n",
    "test_loader = torch.utils.data.DataLoader(\n",
    "  MNIST('MNIST_data', dataset='test', transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])),\n",
    "  batch_size=args.batch_size, shuffle=True, **kwargs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class Net(nn.Module):\n",
    "  def __init__(self):\n",
    "    super(Net, self).__init__()\n",
    "    self.num_filter1 = 8\n",
    "    self.num_filter2 = 16\n",
    "    self.num_padding = 2\n",
    "    # input is 28x28\n",
    "    # padding=2 for same padding\n",
    "    self.conv1 = nn.Conv2d(1, self.num_filter1, 5, padding=self.num_padding)\n",
    "    # feature map size is 14*14 by pooling\n",
    "    # padding=2 for same padding\n",
    "    self.conv2 = nn.Conv2d(self.num_filter1, self.num_filter2, 5, padding=self.num_padding)\n",
    "    # feature map size is 7*7 by pooling\n",
    "    self.fc = nn.Linear(self.num_filter2*7*7, 10)\n",
    "\n",
    "  def forward(self, x):\n",
    "    x = F.max_pool2d(F.relu(self.conv1(x)), 2)\n",
    "    x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n",
    "    x = x.view(-1, self.num_filter2*7*7)   # reshape Variable\n",
    "    x = self.fc(x)\n",
    "    return F.log_softmax(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "model = Net()\n",
    "if args.cuda:\n",
    "  model.cuda()\n",
    "\n",
    "orig_model = copy.deepcopy(model)\n",
    "\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def train(epoch, model, train_loader):\n",
    "  model.train()\n",
    "  for batch_idx, (data, target) in enumerate(train_loader):\n",
    "    if args.cuda:\n",
    "      data, target = data.cuda(), target.cuda()\n",
    "    data, target = Variable(data), Variable(target)\n",
    "    optimizer.zero_grad()\n",
    "    output = model(data)\n",
    "    loss = F.nll_loss(output, target)\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    if batch_idx % args.log_interval == 0:\n",
    "      print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n",
    "            epoch, batch_idx * len(data), len(train_loader.dataset),\n",
    "            100. * batch_idx / len(train_loader), loss.data[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def evaluate(model, test_loader):\n",
    "  model.eval()\n",
    "  test_loss = 0\n",
    "  correct = 0\n",
    "  for data, target in test_loader:\n",
    "    if args.cuda:\n",
    "      data, target = data.cuda(), target.cuda()\n",
    "    data, target = Variable(data, volatile=True), Variable(target)\n",
    "    output = model(data)\n",
    "    test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss\n",
    "    pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n",
    "    correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n",
    "\n",
    "  test_loss /= len(test_loader.dataset)\n",
    "  acc = correct / len(test_loader.dataset)\n",
    "  print('\\nAverage loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n",
    "    test_loss, correct, len(test_loader.dataset),\n",
    "    100. * acc))\n",
    "  return acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train Epoch: 1 [0/50000 (0%)]\tLoss: 2.303826\n",
      "Train Epoch: 1 [10000/50000 (20%)]\tLoss: 1.677860\n",
      "Train Epoch: 1 [20000/50000 (40%)]\tLoss: 0.841788\n",
      "Train Epoch: 1 [30000/50000 (60%)]\tLoss: 0.508113\n",
      "Train Epoch: 1 [40000/50000 (80%)]\tLoss: 0.429086\n",
      "\n",
      "Average loss: 0.3061, Accuracy: 9138/10000 (91.3800%)\n",
      "\n",
      "best valid_acc 91.38\n",
      "Train Epoch: 2 [0/50000 (0%)]\tLoss: 0.322196\n",
      "Train Epoch: 2 [10000/50000 (20%)]\tLoss: 0.280522\n",
      "Train Epoch: 2 [20000/50000 (40%)]\tLoss: 0.266068\n",
      "Train Epoch: 2 [30000/50000 (60%)]\tLoss: 0.249113\n",
      "Train Epoch: 2 [40000/50000 (80%)]\tLoss: 0.212267\n",
      "\n",
      "Average loss: 0.1894, Accuracy: 9464/10000 (94.6400%)\n",
      "\n",
      "best valid_acc 94.64\n",
      "Train Epoch: 3 [0/50000 (0%)]\tLoss: 0.169671\n",
      "Train Epoch: 3 [10000/50000 (20%)]\tLoss: 0.187899\n",
      "Train Epoch: 3 [20000/50000 (40%)]\tLoss: 0.179214\n",
      "Train Epoch: 3 [30000/50000 (60%)]\tLoss: 0.184280\n",
      "Train Epoch: 3 [40000/50000 (80%)]\tLoss: 0.199595\n",
      "\n",
      "Average loss: 0.1434, Accuracy: 9612/10000 (96.1200%)\n",
      "\n",
      "best valid_acc 96.12\n",
      "Train Epoch: 4 [0/50000 (0%)]\tLoss: 0.142606\n",
      "Train Epoch: 4 [10000/50000 (20%)]\tLoss: 0.128802\n",
      "Train Epoch: 4 [20000/50000 (40%)]\tLoss: 0.115742\n",
      "Train Epoch: 4 [30000/50000 (60%)]\tLoss: 0.123769\n",
      "Train Epoch: 4 [40000/50000 (80%)]\tLoss: 0.124975\n",
      "\n",
      "Average loss: 0.1124, Accuracy: 9696/10000 (96.9600%)\n",
      "\n",
      "best valid_acc 96.96000000000001\n",
      "Train Epoch: 5 [0/50000 (0%)]\tLoss: 0.103727\n",
      "Train Epoch: 5 [10000/50000 (20%)]\tLoss: 0.092302\n",
      "Train Epoch: 5 [20000/50000 (40%)]\tLoss: 0.103799\n",
      "Train Epoch: 5 [30000/50000 (60%)]\tLoss: 0.079889\n",
      "Train Epoch: 5 [40000/50000 (80%)]\tLoss: 0.092919\n",
      "\n",
      "Average loss: 0.0918, Accuracy: 9739/10000 (97.3900%)\n",
      "\n",
      "best valid_acc 97.39\n",
      "Train Epoch: 6 [0/50000 (0%)]\tLoss: 0.083168\n",
      "Train Epoch: 6 [10000/50000 (20%)]\tLoss: 0.110532\n",
      "Train Epoch: 6 [20000/50000 (40%)]\tLoss: 0.093374\n",
      "Train Epoch: 6 [30000/50000 (60%)]\tLoss: 0.075025\n",
      "Train Epoch: 6 [40000/50000 (80%)]\tLoss: 0.095143\n",
      "\n",
      "Average loss: 0.0791, Accuracy: 9771/10000 (97.7100%)\n",
      "\n",
      "best valid_acc 97.71\n",
      "Train Epoch: 7 [0/50000 (0%)]\tLoss: 0.069611\n",
      "Train Epoch: 7 [10000/50000 (20%)]\tLoss: 0.061767\n",
      "Train Epoch: 7 [20000/50000 (40%)]\tLoss: 0.079088\n",
      "Train Epoch: 7 [30000/50000 (60%)]\tLoss: 0.059623\n",
      "Train Epoch: 7 [40000/50000 (80%)]\tLoss: 0.084319\n",
      "\n",
      "Average loss: 0.0741, Accuracy: 9786/10000 (97.8600%)\n",
      "\n",
      "best valid_acc 97.86\n",
      "Train Epoch: 8 [0/50000 (0%)]\tLoss: 0.073275\n",
      "Train Epoch: 8 [10000/50000 (20%)]\tLoss: 0.081987\n",
      "Train Epoch: 8 [20000/50000 (40%)]\tLoss: 0.082448\n",
      "Train Epoch: 8 [30000/50000 (60%)]\tLoss: 0.068978\n",
      "Train Epoch: 8 [40000/50000 (80%)]\tLoss: 0.063742\n",
      "\n",
      "Average loss: 0.0691, Accuracy: 9796/10000 (97.9600%)\n",
      "\n",
      "best valid_acc 97.96000000000001\n",
      "Train Epoch: 9 [0/50000 (0%)]\tLoss: 0.064298\n",
      "Train Epoch: 9 [10000/50000 (20%)]\tLoss: 0.065013\n",
      "Train Epoch: 9 [20000/50000 (40%)]\tLoss: 0.069385\n",
      "Train Epoch: 9 [30000/50000 (60%)]\tLoss: 0.041582\n",
      "Train Epoch: 9 [40000/50000 (80%)]\tLoss: 0.066154\n",
      "\n",
      "Average loss: 0.0631, Accuracy: 9815/10000 (98.1500%)\n",
      "\n",
      "best valid_acc 98.15\n",
      "Train Epoch: 10 [0/50000 (0%)]\tLoss: 0.058424\n",
      "Train Epoch: 10 [10000/50000 (20%)]\tLoss: 0.061783\n",
      "Train Epoch: 10 [20000/50000 (40%)]\tLoss: 0.037734\n",
      "Train Epoch: 10 [30000/50000 (60%)]\tLoss: 0.068406\n",
      "Train Epoch: 10 [40000/50000 (80%)]\tLoss: 0.063054\n",
      "\n",
      "Average loss: 0.0604, Accuracy: 9816/10000 (98.1600%)\n",
      "\n",
      "best valid_acc 98.16\n",
      "Train Epoch: 11 [0/50000 (0%)]\tLoss: 0.038760\n",
      "Train Epoch: 11 [10000/50000 (20%)]\tLoss: 0.064318\n",
      "Train Epoch: 11 [20000/50000 (40%)]\tLoss: 0.044094\n",
      "Train Epoch: 11 [30000/50000 (60%)]\tLoss: 0.053251\n",
      "Train Epoch: 11 [40000/50000 (80%)]\tLoss: 0.041955\n",
      "\n",
      "Average loss: 0.0585, Accuracy: 9827/10000 (98.2700%)\n",
      "\n",
      "best valid_acc 98.27\n",
      "Train Epoch: 12 [0/50000 (0%)]\tLoss: 0.029978\n",
      "Train Epoch: 12 [10000/50000 (20%)]\tLoss: 0.032916\n",
      "Train Epoch: 12 [20000/50000 (40%)]\tLoss: 0.041434\n",
      "Train Epoch: 12 [30000/50000 (60%)]\tLoss: 0.067432\n",
      "Train Epoch: 12 [40000/50000 (80%)]\tLoss: 0.058171\n",
      "\n",
      "Average loss: 0.0535, Accuracy: 9839/10000 (98.3900%)\n",
      "\n",
      "best valid_acc 98.39\n",
      "Train Epoch: 13 [0/50000 (0%)]\tLoss: 0.036091\n",
      "Train Epoch: 13 [10000/50000 (20%)]\tLoss: 0.057308\n",
      "Train Epoch: 13 [20000/50000 (40%)]\tLoss: 0.058362\n",
      "Train Epoch: 13 [30000/50000 (60%)]\tLoss: 0.077994\n",
      "Train Epoch: 13 [40000/50000 (80%)]\tLoss: 0.033563\n",
      "\n",
      "Average loss: 0.0561, Accuracy: 9831/10000 (98.3100%)\n",
      "\n",
      "Train Epoch: 14 [0/50000 (0%)]\tLoss: 0.044331\n",
      "Train Epoch: 14 [10000/50000 (20%)]\tLoss: 0.049645\n",
      "Train Epoch: 14 [20000/50000 (40%)]\tLoss: 0.032262\n",
      "Train Epoch: 14 [30000/50000 (60%)]\tLoss: 0.036172\n",
      "Train Epoch: 14 [40000/50000 (80%)]\tLoss: 0.031683\n",
      "\n",
      "Average loss: 0.0522, Accuracy: 9849/10000 (98.4900%)\n",
      "\n",
      "best valid_acc 98.49\n",
      "Train Epoch: 15 [0/50000 (0%)]\tLoss: 0.034813\n",
      "Train Epoch: 15 [10000/50000 (20%)]\tLoss: 0.046153\n",
      "Train Epoch: 15 [20000/50000 (40%)]\tLoss: 0.041864\n",
      "Train Epoch: 15 [30000/50000 (60%)]\tLoss: 0.027925\n",
      "Train Epoch: 15 [40000/50000 (80%)]\tLoss: 0.033366\n",
      "\n",
      "Average loss: 0.0552, Accuracy: 9841/10000 (98.4100%)\n",
      "\n",
      "Train Epoch: 16 [0/50000 (0%)]\tLoss: 0.031628\n",
      "Train Epoch: 16 [10000/50000 (20%)]\tLoss: 0.035893\n",
      "Train Epoch: 16 [20000/50000 (40%)]\tLoss: 0.057490\n",
      "Train Epoch: 16 [30000/50000 (60%)]\tLoss: 0.041925\n",
      "Train Epoch: 16 [40000/50000 (80%)]\tLoss: 0.034331\n",
      "\n",
      "Average loss: 0.0552, Accuracy: 9839/10000 (98.3900%)\n",
      "\n",
      "Train Epoch: 17 [0/50000 (0%)]\tLoss: 0.028255\n",
      "Train Epoch: 17 [10000/50000 (20%)]\tLoss: 0.034863\n",
      "Train Epoch: 17 [20000/50000 (40%)]\tLoss: 0.038391\n",
      "Train Epoch: 17 [30000/50000 (60%)]\tLoss: 0.037168\n",
      "Train Epoch: 17 [40000/50000 (80%)]\tLoss: 0.034141\n",
      "\n",
      "Average loss: 0.0493, Accuracy: 9858/10000 (98.5800%)\n",
      "\n",
      "best valid_acc 98.58\n",
      "Train Epoch: 18 [0/50000 (0%)]\tLoss: 0.029900\n",
      "Train Epoch: 18 [10000/50000 (20%)]\tLoss: 0.046907\n",
      "Train Epoch: 18 [20000/50000 (40%)]\tLoss: 0.028100\n",
      "Train Epoch: 18 [30000/50000 (60%)]\tLoss: 0.029808\n",
      "Train Epoch: 18 [40000/50000 (80%)]\tLoss: 0.029611\n",
      "\n",
      "Average loss: 0.0473, Accuracy: 9867/10000 (98.6700%)\n",
      "\n",
      "best valid_acc 98.67\n",
      "Train Epoch: 19 [0/50000 (0%)]\tLoss: 0.037320\n",
      "Train Epoch: 19 [10000/50000 (20%)]\tLoss: 0.044111\n",
      "Train Epoch: 19 [20000/50000 (40%)]\tLoss: 0.041239\n",
      "Train Epoch: 19 [30000/50000 (60%)]\tLoss: 0.032999\n",
      "Train Epoch: 19 [40000/50000 (80%)]\tLoss: 0.036658\n",
      "\n",
      "Average loss: 0.0474, Accuracy: 9863/10000 (98.6300%)\n",
      "\n",
      "Train Epoch: 20 [0/50000 (0%)]\tLoss: 0.026344\n",
      "Train Epoch: 20 [10000/50000 (20%)]\tLoss: 0.027241\n",
      "Train Epoch: 20 [20000/50000 (40%)]\tLoss: 0.023188\n",
      "Train Epoch: 20 [30000/50000 (60%)]\tLoss: 0.018313\n",
      "Train Epoch: 20 [40000/50000 (80%)]\tLoss: 0.044020\n",
      "\n",
      "Average loss: 0.0487, Accuracy: 9857/10000 (98.5700%)\n",
      "\n",
      "Train Epoch: 21 [0/50000 (0%)]\tLoss: 0.042182\n",
      "Train Epoch: 21 [10000/50000 (20%)]\tLoss: 0.034582\n",
      "Train Epoch: 21 [20000/50000 (40%)]\tLoss: 0.037214\n",
      "Train Epoch: 21 [30000/50000 (60%)]\tLoss: 0.027711\n",
      "Train Epoch: 21 [40000/50000 (80%)]\tLoss: 0.033280\n",
      "\n",
      "Average loss: 0.0468, Accuracy: 9870/10000 (98.7000%)\n",
      "\n",
      "best valid_acc 98.7\n",
      "Train Epoch: 22 [0/50000 (0%)]\tLoss: 0.023809\n",
      "Train Epoch: 22 [10000/50000 (20%)]\tLoss: 0.030482\n",
      "Train Epoch: 22 [20000/50000 (40%)]\tLoss: 0.026654\n",
      "Train Epoch: 22 [30000/50000 (60%)]\tLoss: 0.021818\n",
      "Train Epoch: 22 [40000/50000 (80%)]\tLoss: 0.022738\n",
      "\n",
      "Average loss: 0.0490, Accuracy: 9862/10000 (98.6200%)\n",
      "\n",
      "Train Epoch: 23 [0/50000 (0%)]\tLoss: 0.024872\n",
      "Train Epoch: 23 [10000/50000 (20%)]\tLoss: 0.042878\n",
      "Train Epoch: 23 [20000/50000 (40%)]\tLoss: 0.031781\n",
      "Train Epoch: 23 [30000/50000 (60%)]\tLoss: 0.025498\n",
      "Train Epoch: 23 [40000/50000 (80%)]\tLoss: 0.030885\n",
      "\n",
      "Average loss: 0.0451, Accuracy: 9873/10000 (98.7300%)\n",
      "\n",
      "best valid_acc 98.72999999999999\n",
      "Train Epoch: 24 [0/50000 (0%)]\tLoss: 0.030728\n",
      "Train Epoch: 24 [10000/50000 (20%)]\tLoss: 0.025797\n",
      "Train Epoch: 24 [20000/50000 (40%)]\tLoss: 0.021019\n",
      "Train Epoch: 24 [30000/50000 (60%)]\tLoss: 0.023815\n",
      "Train Epoch: 24 [40000/50000 (80%)]\tLoss: 0.033169\n",
      "\n",
      "Average loss: 0.0454, Accuracy: 9873/10000 (98.7300%)\n",
      "\n",
      "best valid_acc 98.72999999999999\n",
      "Train Epoch: 25 [0/50000 (0%)]\tLoss: 0.025571\n",
      "Train Epoch: 25 [10000/50000 (20%)]\tLoss: 0.023449\n",
      "Train Epoch: 25 [20000/50000 (40%)]\tLoss: 0.025415\n",
      "Train Epoch: 25 [30000/50000 (60%)]\tLoss: 0.018114\n",
      "Train Epoch: 25 [40000/50000 (80%)]\tLoss: 0.033626\n",
      "\n",
      "Average loss: 0.0482, Accuracy: 9869/10000 (98.6900%)\n",
      "\n",
      "Train Epoch: 26 [0/50000 (0%)]\tLoss: 0.018225\n",
      "Train Epoch: 26 [10000/50000 (20%)]\tLoss: 0.021161\n",
      "Train Epoch: 26 [20000/50000 (40%)]\tLoss: 0.022208\n",
      "Train Epoch: 26 [30000/50000 (60%)]\tLoss: 0.036804\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train Epoch: 26 [40000/50000 (80%)]\tLoss: 0.035899\n",
      "\n",
      "Average loss: 0.0441, Accuracy: 9884/10000 (98.8400%)\n",
      "\n",
      "best valid_acc 98.83999999999999\n",
      "Train Epoch: 27 [0/50000 (0%)]\tLoss: 0.013573\n",
      "Train Epoch: 27 [10000/50000 (20%)]\tLoss: 0.016220\n",
      "Train Epoch: 27 [20000/50000 (40%)]\tLoss: 0.021805\n",
      "Train Epoch: 27 [30000/50000 (60%)]\tLoss: 0.030295\n",
      "Train Epoch: 27 [40000/50000 (80%)]\tLoss: 0.027188\n",
      "\n",
      "Average loss: 0.0431, Accuracy: 9881/10000 (98.8100%)\n",
      "\n",
      "Train Epoch: 28 [0/50000 (0%)]\tLoss: 0.021916\n",
      "Train Epoch: 28 [10000/50000 (20%)]\tLoss: 0.019936\n",
      "Train Epoch: 28 [20000/50000 (40%)]\tLoss: 0.021940\n",
      "Train Epoch: 28 [30000/50000 (60%)]\tLoss: 0.010939\n",
      "Train Epoch: 28 [40000/50000 (80%)]\tLoss: 0.031478\n",
      "\n",
      "Average loss: 0.0441, Accuracy: 9881/10000 (98.8100%)\n",
      "\n",
      "Train Epoch: 29 [0/50000 (0%)]\tLoss: 0.015474\n",
      "Train Epoch: 29 [10000/50000 (20%)]\tLoss: 0.014100\n",
      "Train Epoch: 29 [20000/50000 (40%)]\tLoss: 0.018405\n",
      "Train Epoch: 29 [30000/50000 (60%)]\tLoss: 0.016726\n",
      "Train Epoch: 29 [40000/50000 (80%)]\tLoss: 0.017310\n",
      "\n",
      "Average loss: 0.0453, Accuracy: 9877/10000 (98.7700%)\n",
      "\n",
      "Train Epoch: 30 [0/50000 (0%)]\tLoss: 0.010478\n",
      "Train Epoch: 30 [10000/50000 (20%)]\tLoss: 0.024923\n",
      "Train Epoch: 30 [20000/50000 (40%)]\tLoss: 0.011567\n",
      "Train Epoch: 30 [30000/50000 (60%)]\tLoss: 0.021501\n",
      "Train Epoch: 30 [40000/50000 (80%)]\tLoss: 0.026448\n",
      "\n",
      "Average loss: 0.0443, Accuracy: 9879/10000 (98.7900%)\n",
      "\n",
      "\n",
      "Average loss: 0.0392, Accuracy: 9879/10000 (98.7900%)\n",
      "\n",
      "final test acc 98.79\n"
     ]
    }
   ],
   "source": [
    "best_valid_acc = 0\n",
    "for epoch in range(1, args.epochs + 1):\n",
    "  train(epoch, model, train_loader)\n",
    "  valid_acc = evaluate(model, valid_loader)\n",
    "  if valid_acc >= best_valid_acc:\n",
    "    best_valid_acc = valid_acc\n",
    "    best_model = copy.deepcopy(model)\n",
    "    print('best valid_acc', best_valid_acc * 100.)\n",
    "\n",
    "eval_acc = evaluate(best_model, test_loader)\n",
    "print('final test acc', eval_acc * 100.)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Average loss: 0.0373, Accuracy: 9885/10000 (98.8500%)\n",
      "\n",
      "final test acc 98.85000000000001\n"
     ]
    }
   ],
   "source": [
    "eval_acc = evaluate(model, test_loader)\n",
    "print('final test acc', eval_acc * 100.)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([8, 1, 5, 5])\n",
      "torch.Size([8])\n",
      "torch.Size([16, 8, 5, 5])\n",
      "torch.Size([16])\n",
      "torch.Size([10, 784])\n",
      "torch.Size([10])\n",
      "11274\n"
     ]
    }
   ],
   "source": [
    "param_count = 0\n",
    "for param in model.parameters():\n",
    "  print(param.data.shape)\n",
    "  param_count += np.product(param.data.shape)\n",
    "print(param_count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "orig_params = []\n",
    "for param in orig_model.parameters():\n",
    "  orig_params.append(param.data.cpu().numpy().flatten())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "orig_params_flat = np.concatenate(orig_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEexJREFUeJzt3X+IZWd9x/H3xySmRS1NmnFdN2snwrZlU2qUaZQqRWtr\nYvxjI5SwodiFpqxCFAX9Y6N/aJGFtFSFQpWuGtwWNV0wksWkLckiiKiJE4lJNjFmNRuyyya7/qr2\nn7RZv/1jTvQ6zsw9d+69OzPPvF9wmXOf8zznfOfM3c+cee65Z1NVSJLa9by1LkCSNF0GvSQ1zqCX\npMYZ9JLUOINekhpn0EtS4wx6SWqcQS9JjTPoJalx5691AQCXXHJJzc7OrnUZkrSh3HfffT+oqplh\n/dZF0M/OzjI/P7/WZUjShpLkiT79nLqRpMYZ9JLUOINekho3NOiT/EaSe5N8O8nRJH/XtV+c5K4k\nj3VfLxoYc1OSY0keTXLVNL8BSdLK+pzRPwP8WVW9ArgCuDrJa4B9wJGq2gEc6Z6TZCewG7gcuBr4\neJLzplG8JGm4oUFfC/6ne3pB9yhgF3Cwaz8IXNst7wJurapnqupx4Bhw5USrliT11muOPsl5Se4H\nTgN3VdU9wJaqOtV1eQrY0i1vA54cGH6ia5MkrYFeQV9VZ6vqCuBS4Mokf7hofbFwlt9bkr1J5pPM\nnzlzZpShkqQRjHTVTVX9BPgyC3PvTyfZCtB9Pd11OwlsHxh2ade2eFsHqmququZmZoZ+sEuStEp9\nrrqZSfLb3fJvAn8BfAc4DOzpuu0Bbu+WDwO7k1yY5DJgB3DvpAuXWjG77w5m992x1mWoYX1ugbAV\nONhdOfM84FBVfSnJ14FDSW4AngCuA6iqo0kOAQ8DzwI3VtXZ6ZQvSRpmaNBX1QPAK5do/yHwxmXG\n7Af2j12dJGlsfjJWOkecotFaMeglqXEGvTYNz6i1WRn0ktQ4g16bzrk8q/cvCK0HBr02JadxtJkY\n9GqG4S0tzaCXpMYZ9JLUOINekhpn0GtTmNTcve8BaCMy6LXhDHvT1TCWfpVBL3X6/oLw6h5tNAa9\nJDXOoJekxvX5j0ckjcipHa0nntFLa8hfCDoXPKOXejCQtZF5Ri8twWBXSwx6SWqcQS9JjXOOXpoy\np4G01jyjVxMMU2l5Br20jNXe6mDYGH8p6Vwz6CWpcQa9NjXPrrUZDA36JNuTfDnJw0mOJnl31/6h\nJCeT3N89rhkYc1OSY0keTXLVNL8BaSneYVL6pT5X3TwLvLeqvpXkRcB9Se7q1n2sqv5xsHOSncBu\n4HLgpcDdSX6vqs5OsnBpPZnddwfHb37LSP2lc2XoGX1Vnaqqb3XLPwMeAbatMGQXcGtVPVNVjwPH\ngCsnUawkaXQjzdEnmQVeCdzTNb0ryQNJbklyUde2DXhyYNgJlvjFkGRvkvkk82fOnBm5cGmteVau\njaJ30Cd5IfAF4D1V9VPgE8DLgSuAU8BHRtlxVR2oqrmqmpuZmRllqCRpBL2CPskFLIT8Z6vqNoCq\nerqqzlbVz4FP8svpmZPA9oHhl3ZtkqQ1MPTN2CQBPg08UlUfHWjfWlWnuqdvBR7qlg8Dn0vyURbe\njN0B3DvRqqUpcTpGLepz1c1rgbcBDya5v2t7P3B9kiuAAo4DbweoqqNJDgEPs3DFzo1ecSNJa2do\n0FfVV4EsserOFcbsB/aPUZe0IUzyL4BRL9GU+vLulWraJILY6RxtdN4CQZIaZ9Brw1pvZ9redkHr\nlVM32tAMVmk4z+glqXEGvSQ1zqkbbXpO/6h1ntFLUuMMeklqnEEvSY0z6CWpcQa9JDXOoJekxhn0\nktQ4g16SGmfQS1LjDHpJapxBL0mNM+glqXEGvSQ1zqCXpMYZ9JLUOINekhpn0EtS4wx6SWqcQS9J\njRsa9Em2J/lykoeTHE3y7q794iR3JXms+3rRwJibkhxL8miSq6b5DUiSVtbnjP5Z4L1VtRN4DXBj\nkp3APuBIVe0AjnTP6dbtBi4HrgY+nuS8aRQvSRpuaNBX1amq+la3/DPgEWAbsAs42HU7CFzbLe8C\nbq2qZ6rqceAYcOWkC5ck9TPSHH2SWeCVwD3Alqo61a16CtjSLW8DnhwYdqJrW7ytvUnmk8yfOXNm\nxLIlSX31DvokLwS+ALynqn46uK6qCqhRdlxVB6pqrqrmZmZmRhkqSRpBr6BPcgELIf/Zqrqta346\nydZu/VbgdNd+Etg+MPzSrk2StAb6XHUT4NPAI1X10YFVh4E93fIe4PaB9t1JLkxyGbADuHdyJUuS\nRnF+jz6vBd4GPJjk/q7t/cDNwKEkNwBPANcBVNXRJIeAh1m4YufGqjo78colSb0MDfqq+iqQZVa/\ncZkx+4H9Y9QlSZoQPxkrSY0z6CWpcQa9JDXOoJekxhn0ktQ4g16SGmfQS1LjDHpJapxBL0mNM+gl\nqXEGvSQ1zqCXpMYZ9JLUOINekhpn0EtS4wx6SWqcQS9JjTPoJalxBr0kNc6gl6TGGfSS1DiDXpIa\nZ9BLUuMMeklqnEEvSY0z6CWpcUODPsktSU4neWig7UNJTia5v3tcM7DupiTHkjya5KppFS5J6qfP\nGf1ngKuXaP9YVV3RPe4ESLIT2A1c3o35eJLzJlWsJGl0Q4O+qr4C/Kjn9nYBt1bVM1X1OHAMuHKM\n+iRJYxpnjv5dSR7opnYu6tq2AU8O9DnRtf2aJHuTzCeZP3PmzBhlSJJWstqg/wTwcuAK4BTwkVE3\nUFUHqmququZmZmZWWYYkaZhVBX1VPV1VZ6vq58An+eX0zElg+0DXS7s2SdIaWVXQJ9k68PStwHNX\n5BwGdie5MMllwA7g3vFKlCSN4/xhHZJ8Hng9cEmSE8AHgdcnuQIo4DjwdoCqOprkEPAw8CxwY1Wd\nnU7pkqQ+hgZ9VV2/RPOnV+i/H9g/TlGSpMnxk7GS1DiDXpIaZ9BLUuMMeklqnEEvSY0z6CWpcQa9\nJDXOoJekxhn0ktQ4g16SGmfQS1LjDHpJapxBL0mNM+glqXEGvSQ1zqCXpMYZ9JLUOINekhpn0EtS\n4wx6SWqcQS9JjTPoJalxBr0kNc6gl6TGGfSS1LihQZ/kliSnkzw00HZxkruSPNZ9vWhg3U1JjiV5\nNMlV0ypcktRPnzP6zwBXL2rbBxypqh3Ake45SXYCu4HLuzEfT3LexKqVJI1saNBX1VeAHy1q3gUc\n7JYPAtcOtN9aVc9U1ePAMeDKCdUqSVqF1c7Rb6mqU93yU8CWbnkb8ORAvxNdmyRpjYz9ZmxVFVCj\njkuyN8l8kvkzZ86MW4YkaRmrDfqnk2wF6L6e7tpPAtsH+l3atf2aqjpQVXNVNTczM7PKMiRJw6w2\n6A8De7rlPcDtA+27k1yY5DJgB3DveCVKksZx/rAOST4PvB64JMkJ4IPAzcChJDcATwDXAVTV0SSH\ngIeBZ4Ebq+rslGqXJPUwNOir6vplVr1xmf77gf3jFCVJmhw/GStJjTPoJalxBr0kNc6gl6TGGfSS\n1DiDXpIaZ9BLUuMMeklqnEEvSY0z6CWpcQa9JDXOoJekxhn0ktQ4g16SGmfQS1LjDHpJapxBL0mN\nM+glqXEGvSQ1zqDXOTe77461LkHaVAx6SWqcQS81wL+StBKDXhM3u+8Og0daRwx6SWqcQa8NYbP9\nlTCN7/W5Y7iZjqMWGPTaUAwpaXTnjzM4yXHgZ8BZ4NmqmktyMfDvwCxwHLiuqn48XpmSRvHcL8Tj\nN79ljSvRejCJM/o3VNUVVTXXPd8HHKmqHcCR7rkkaY1MY+pmF3CwWz4IXDuFfUiSehpr6gYo4O4k\nZ4F/qaoDwJaqOtWtfwrYMuY+JPWw3HSN72to3KB/XVWdTPJi4K4k3xlcWVWVpJYamGQvsBfgZS97\n2ZhlSFqKIS8Yc+qmqk52X08DXwSuBJ5OshWg+3p6mbEHqmququZmZmbGKUPrlCEzGR5HjWvVQZ/k\nBUle9Nwy8CbgIeAwsKfrtge4fdwiJUmrN87UzRbgi0me287nquo/k3wTOJTkBuAJ4Lrxy9RG1Wfe\neLlLADfjJYKDx8UzeU3KqoO+qr4PvGKJ9h8CbxynKG0umzHQpXPJT8Zq3fAMVpoOg17aYIbdr8Zf\nmFps3MsrpV8YFjAG0GTN7rvD6S71YtBLG5i/PNWHUzfSBmKwazUMep0TowaUgSZNjlM3WlcM+KV5\nXDSOVC15K5pzam5urubn59e6DI3BINqYfDN3Y0ty38At4pfl1I0kNc6gl6TGGfSS1DiDXpIaZ9BL\nUuMMeklqnEEvSY0z6CWpcQa9JDXOoJekxhn0ktQ4g16SGmfQS1LjDHpJapxBL0mNM+glqXEGvSQ1\nzqCXpMZNLeiTXJ3k0STHkuyb1n4kSSubStAnOQ/4Z+DNwE7g+iQ7p7EvSdoI1vL/VZ7WGf2VwLGq\n+n5V/S9wK7BrSvv6tQM4u++OJdv6jO2zr6W2v9T2BvuuZv8rjetTSx/j7L9vH61fi1+vK/Vbav1q\nf/4r/Xvou70+/74Wb3OUvkvtZ9z+a+X8KW13G/DkwPMTwKuntC9gvN+WS409fvNbfqX9+M1vGXuf\ny21v2H4G+4yz/rl1g8uL97/autWGwdfRUq+pxa+dpdpHfa31fW0vrqtP7X0s9z2N0nel9pVqheW/\n70lKVU1+o8lfAldX1d92z98GvLqq3jnQZy+wt3v6+8Cjq9jVJcAPxix3WtZrbdY1uvVa23qtC9Zv\nba3V9btVNTOs07TO6E8C2weeX9q1/UJVHQAOjLOTJPNVNTfONqZlvdZmXaNbr7Wt17pg/da2Weua\n1hz9N4EdSS5L8nxgN3B4SvuSJK1gKmf0VfVskncC/wWcB9xSVUensS9J0sqmNXVDVd0J3Dmt7XfG\nmvqZsvVam3WNbr3Wtl7rgvVb26asaypvxkqS1g9vgSBJjVv3QZ/k4iR3JXms+3rREn22J/lykoeT\nHE3y7lHGT6uurt8tSU4neWhR+4eSnExyf/e4ZhJ1Tai2tT5mS94+Y9LHbNhtOrLgn7r1DyR5Vd+x\n4xqztuNJHuyO0fw5rusPknw9yTNJ3jfK2DWsa2rHq2dtf9X9DB9M8rUkr+g7treqWtcP4B+Afd3y\nPuDvl+izFXhVt/wi4LvAzr7jp1VXt+5PgVcBDy1q/xDwvrU6ZkNqW7NjxsKb998DXg48H/j2wM9y\nYsdspf0M9LkG+A8gwGuAe/qOXavaunXHgUum8LrqU9eLgT8G9g/+rKZ5zMapa5rHa4Ta/gS4qFt+\n8zReZ+v+jJ6FWycc7JYPAtcu7lBVp6rqW93yz4BHWPh0bq/x06qrq+crwI8mtM++xq1tLY/Zubp9\nRp/97AL+tRZ8A/jtJFvPQY3j1DZNQ+uqqtNV9U3g/0Ydu0Z1TVuf2r5WVT/unn6Dhc8d9Rrb10YI\n+i1VdapbfgrYslLnJLPAK4F7VjN+WnUt413dn2y3TGp6ZEK1reUxW+r2GdsGnk/qmA3bz0p9+owd\nxzi1ARRwd5L7svAJ9HNZ1zTGTnvb0zpeMHptN7Dwl9pqxi5rapdXjiLJ3cBLllj1gcEnVVVJlr1M\nKMkLgS8A76mqny5eP2z8tOpaxieAD7PwIvsw8BHgb9ZJbasev56P2Sbyuqo6meTFwF1JvtP99aal\nrYvjleQNLAT96ya97XUR9FX158utS/J0kq1Vdar70/T0Mv0uYCHkP1tVtw2s6jV+WnWtsO2nB7b1\nSeBLI46fWm2s7TFb9vYZ4x6zvvvp0eeCHmPHMU5tVNVzX08n+SILUwCTCK4+dU1j7FS3PcXj1bu2\nJH8EfAp4c1X9cJSxfWyEqZvDwJ5ueQ9w++IOSQJ8Gnikqj466vhp1bWSRfOpbwUeWq7vKoz7Pa/l\nMVv29hkTPmZ9btNxGPjr7gqX1wD/3U09TfsWH6uuLckLkrwIIMkLgDcxudfWON/3NI/Zqrc95ePV\nq7YkLwNuA95WVd8dZWxv03ineZIP4HeAI8BjwN3AxV37S4E7u+XXsfDn/APA/d3jmpXGn4u6uuef\nB06x8CbQCeCGrv3fgAe7mg8DW8/lMRtS21ofs2tYuHLqe8AHBtonesyW2g/wDuAd3XJY+A90vtft\nd25YjRP8Ga6qNhau0Ph29zg66dp61PWS7rX0U+An3fJvTfuYrbauaR+vnrV9Cvgxv8yu+Um/zvxk\nrCQ1biNM3UiSxmDQS1LjDHpJapxBL0mNM+glqXEGvSQ1zqCXpMYZ9JLUuP8Hm+2DKUyf1DkAAAAA\nSUVORK5CYII=\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x7f238898fcf8>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "_ = plt.hist(orig_params_flat, bins=200)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "final_params = []\n",
    "for param in best_model.parameters():\n",
    "  final_params.append(param.data.cpu().numpy().flatten())\n",
    "final_params_flat = np.concatenate(final_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAE7pJREFUeJzt3W+MXNd93vHvE1qVgzpGpGjL0iTdlQG6AJXWVLBhDChA\nHduqZMkIZaAQKCQKX6iggyiuDBhIyBZoHBQEGCD+0wKVUdoWwjZOGCJ2KsJSkkqMgMBILHrlKopI\nRRERURAJStw4dm2/YUvq1xd7aQ+p3Z2Z3Zn9c/b7ARZ777nnzvz2cvnM2TP33klVIUlq14+sdAGS\npPEy6CWpcQa9JDXOoJekxhn0ktQ4g16SGmfQS1LjDHpJapxBL0mNe8ugHZNsAKaBc1X14SQ3An8A\nTAJngHur6ttd3/3AA8Bl4N9V1Z8u9Ng33XRTTU5OLqZ+SVq3nnnmmb+vqol+/QYOeuAh4AXg7d36\nPuB4VR1Msq9b//Uk24HdwC3AO4Ank7y7qi7P98CTk5NMT08PUYokKckrg/QbaOomyRbgbuALPc27\ngMPd8mHgnp72I1V1sapeBk4DOwd5HknS6A06R/9Z4NeAN3raNlbV+W75NWBjt7wZeLWn39mu7SpJ\n9iaZTjI9MzMzXNWSpIH1DfokHwYuVNUz8/Wp2VtgDnUbzKo6VFVTVTU1MdF3ikmStEiDzNHfBvx8\nkruAtwJvT/K7wOtJNlXV+SSbgAtd/3PA1p79t3RtkqQV0HdEX1X7q2pLVU0y+ybrn1XVLwLHgD1d\ntz3Ao93yMWB3kuuT3AxsA06MvHJJ0kCGOevmWgeBo0keAF4B7gWoqpNJjgKngEvAgwudcSNJGq+s\nhk+YmpqaKk+vlKThJHmmqqb69fPKWElqnEEvSY0z6KUhTO57bKVLkIZm0EtS4wx6SWqcQS9JjTPo\nJalxBr0kNc6glxbJM3C0Vhj0ktQ4g16SGmfQS1LjDHpJapxBL0mNM+glqXEGvSQ1zqCXpMb1Dfok\nb01yIslfJTmZ5De79k8mOZfk2e7rrp599ic5neTFJHeM8weQJC1skM+MvQi8v6q+n+Q64GtJ/rjb\n9pmq+u3ezkm2M/sh4rcA7wCeTPJuPzdWklZG3xF9zfp+t3pd97XQB83uAo5U1cWqehk4DexccqWS\npEUZaI4+yYYkzwIXgCeq6ulu08eSPJfkkSQ3dG2bgVd7dj/btUmSVsBAQV9Vl6tqB7AF2JnkJ4HP\nAe8CdgDngU8N88RJ9iaZTjI9MzMzZNmSpEENddZNVX0HeAq4s6pe714A3gA+zw+nZ84BW3t229K1\nXftYh6pqqqqmJiYmFle9tAy8S6XWur5vxiaZAP5fVX0nyY8CtwO/lWRTVZ3vun0EeL5bPgb8XpJP\nM/tm7DbgxOhLl8ZrvoA3+LXWDHLWzSbgcJINzP4FcLSqvprkfyTZwewbs2eAjwJU1ckkR4FTwCXg\nQc+4kaSV0zfoq+o54NY52u9fYJ8DwIGllSYtvyuj9TMH756zXVqLvDJWkhpn0EtS4wx6SWqcQS9J\njTPopRHwzVqtZga9JDXOoJekxg1ywZS07gw6FeOUjdYCR/SS1DiDXpIaZ9BLUuMMeklqnEEvSY0z\n6CWpcQa9NCKT+x7zdEutSga9JDXOoJekxhn0ktS4vkGf5K1JTiT5qyQnk/xm135jkieSvNR9v6Fn\nn/1JTid5Mckd4/wBJEkLG2REfxF4f1W9B9gB3JnkvcA+4HhVbQOOd+sk2Q7sBm4B7gQe7j5YXJK0\nAvoGfc36frd6XfdVwC7gcNd+GLinW94FHKmqi1X1MnAa2DnSqiVJAxtojj7JhiTPAheAJ6rqaWBj\nVZ3vurwGbOyWNwOv9ux+tmuTVjVPjVSrBgr6qrpcVTuALcDOJD95zfZidpQ/sCR7k0wnmZ6ZmRlm\nV0nSEIY666aqvgM8xezc++tJNgF03y903c4BW3t229K1XftYh6pqqqqmJiYmFlO7tGp58ZRWk0HO\nuplI8uPd8o8CtwN/AxwD9nTd9gCPdsvHgN1Jrk9yM7ANODHqwiVJgxnkE6Y2AYe7M2d+BDhaVV9N\n8pfA0SQPAK8A9wJU1ckkR4FTwCXgwaq6PJ7ypdXHkbxWm75BX1XPAbfO0f4t4APz7HMAOLDk6iRJ\nS+aVsZLUOINekhpn0EtS4wx6SWqcQS9JjTPoJalxBr0kNc6gl6TGGfSS1DiDXuuSNx3TemLQS2Pk\ni4lWA4Nekhpn0EtS4wa5TbHUNKdX1DpH9JLUOINekhpn0Gtdc9pG64FBL0mNG+TDwbcmeSrJqSQn\nkzzUtX8yybkkz3Zfd/Xssz/J6SQvJrljnD+AJGlhg5x1cwn4RFV9M8mPAc8keaLb9pmq+u3ezkm2\nA7uBW4B3AE8mebcfEC5JK6PviL6qzlfVN7vl7wEvAJsX2GUXcKSqLlbVy8BpYOcoipUkDW+oOfok\nk8CtwNNd08eSPJfkkSQ3dG2bgVd7djvLwi8MkqQxGjjok7wN+DLw8ar6LvA54F3ADuA88KlhnjjJ\n3iTTSaZnZmaG2VWSNISBgj7JdcyG/Jeq6isAVfV6VV2uqjeAz/PD6ZlzwNae3bd0bVepqkNVNVVV\nUxMTE0v5GSRJCxjkrJsAXwReqKpP97Rv6un2EeD5bvkYsDvJ9UluBrYBJ0ZXsiRpGIOcdXMbcD/w\n10me7dr+PXBfkh1AAWeAjwJU1ckkR4FTzJ6x86Bn3Gg18SIprTd9g76qvgZkjk2PL7DPAeDAEuqS\nJI2IV8ZKUuO8TbHWjZWasrnyvGcO3r0izy85opekxhn0ktQ4g16SGmfQS8tkct9jntqpFWHQS8vM\nsNdyM+glqXEGvSQ1zqCXpMYZ9JLUOINekhpn0EtS4wx6SWqcQS9JjTPoJalxBr0kNc6g17rgbQe0\nng3y4eBbkzyV5FSSk0ke6tpvTPJEkpe67zf07LM/yekkLya5Y5w/gCRpYYOM6C8Bn6iq7cB7gQeT\nbAf2AcerahtwvFun27YbuAW4E3g4yYZxFC9J6q9v0FfV+ar6Zrf8PeAFYDOwCzjcdTsM3NMt7wKO\nVNXFqnoZOA3sHHXhkqTBDDVHn2QSuBV4GthYVee7Ta8BG7vlzcCrPbud7dokSStg4KBP8jbgy8DH\nq+q7vduqqoAa5omT7E0ynWR6ZmZmmF0lSUN4yyCdklzHbMh/qaq+0jW/nmRTVZ1Psgm40LWfA7b2\n7L6la7tKVR0CDgFMTU0N9SIhDcqzbaTBzroJ8EXghar6dM+mY8CebnkP8GhP++4k1ye5GdgGnBhd\nyZKkYQwydXMbcD/w/iTPdl93AQeB25O8BHywW6eqTgJHgVPAnwAPVtXlsVQvzcFRvHS1vlM3VfU1\nIPNs/sA8+xwADiyhLmldmNz3GGcO3r3SZahxXhkrSY0b6M1Yaa1Z7dM3q70+tcURvSQ1zqCXpMYZ\n9JLUOINekhpn0EtS4wx6SWqcQS9JjTPoJalxBr0kNc6gl6TGGfSS1DiDXpIaZ9BLK2xy32Pe5Exj\nZdBLUuMMeklqnEEvSY0b5MPBH0lyIcnzPW2fTHLums+QvbJtf5LTSV5Mcse4CpckDWaQEf3vAHfO\n0f6ZqtrRfT0OkGQ7sBu4pdvn4SQbRlWsJGl4fYO+qv4c+IcBH28XcKSqLlbVy8BpYOcS6pPWDc+8\n0bgsZY7+Y0me66Z2bujaNgOv9vQ527W9SZK9SaaTTM/MzCyhDEnSQhYb9J8D3gXsAM4Dnxr2Aarq\nUFVNVdXUxMTEIsuQJPWzqKCvqter6nJVvQF8nh9Oz5wDtvZ03dK1SWPltIc0v0UFfZJNPasfAa6c\nkXMM2J3k+iQ3A9uAE0srUZK0FG/p1yHJ7wPvA25Kchb4DeB9SXYABZwBPgpQVSeTHAVOAZeAB6vq\n8nhKlyQNom/QV9V9czR/cYH+B4ADSylKkjQ6XhkrSY0z6CWpcQa9JDXOoJekxhn0ktS4vmfdSGuF\nF01Jc3NEL0mNM+i1ZjmClwZj0EtS4wx6aRWZ3PeYf6lo5Ax6SWqcQS9JjTPoJalxBr0kNc4LprSm\n+cal1J8jemkV8gVMo2TQS1Lj+gZ9kkeSXEjyfE/bjUmeSPJS9/2Gnm37k5xO8mKSO8ZVuCRpMIOM\n6H8HuPOatn3A8araBhzv1kmyHdgN3NLt83CSDSOrVpI0tL5BX1V/DvzDNc27gMPd8mHgnp72I1V1\nsapeBk4DO0dUqyRpERZ71s3GqjrfLb8GbOyWNwNf7+l3tmuTRsY3KqXhLPnN2KoqoIbdL8neJNNJ\npmdmZpZahtQc73ujUVls0L+eZBNA9/1C134O2NrTb0vX9iZVdaiqpqpqamJiYpFlSO0z7LVUiw36\nY8CebnkP8GhP++4k1ye5GdgGnFhaiZKkpeg7R5/k94H3ATclOQv8BnAQOJrkAeAV4F6AqjqZ5Chw\nCrgEPFhVl8dUuyRpAH2Dvqrum2fTB+bpfwA4sJSiJEmj45WxktQ4g15rim9MSsMz6CWpcd6mWGuC\nI3lp8RzRS1LjDHpJapxBL0mNM+glqXEGvSQ1zqCXpMZ5eqW0BvSeXnrm4N0rWInWIkf0ktQ4g16S\nGmfQa1XyStj+PEYalHP00hpjwGtYjuglqXEGvSQ1zqDXqjW57zGnKaQRWNIcfZIzwPeAy8ClqppK\nciPwB8AkcAa4t6q+vbQyJUmLNYoR/c9V1Y6qmurW9wHHq2obcLxbl+bkiF0av3FM3ewCDnfLh4F7\nxvAcWkd8MZif01saxFJPryzgySSXgf9WVYeAjVV1vtv+GrBxic+hdcLAksZjqUH/s1V1Lsk/AZ5I\n8je9G6uqktRcOybZC+wFeOc737nEMiRJ81lS0FfVue77hSR/BOwEXk+yqarOJ9kEXJhn30PAIYCp\nqak5Xwy0PjiSl8Zr0XP0Sf5xkh+7sgz8a+B54Biwp+u2B3h0qUVKWpgvllrIUkb0G4E/SnLlcX6v\nqv4kyTeAo0keAF4B7l16mWqNwTR6V46ptzHWtRYd9FX1d8B75mj/FvCBpRSlNhlE0srwylhJapxB\nL0mNM+glqXEGvSQ1zqDXsug9y8YzbqTlZdBLDfIeOOpl0EsNM+wFBr3GyJCRVgeDXmrMMC+wvhiv\nD0u9e6X0JobH6tL77+FVyeuTI3pJapwjeo3MXCN5R/eri/cbWp8c0WskDHRp9XJEr4E4EmzLXBew\n+W/bLkf0WjRH8W3x37Ndjug1r0H+4xsO7XKk3w5H9JJ+4MqtEwa5N5Ev8muHI/p1arHnVvufW1p7\nxhb0Se4E/jOwAfhCVR0c13NpaeYKb/9cV6/eaRxf7NeeVNXoHzTZAPwtcDtwFvgGcF9VnZqr/9TU\nVE1PT4+8Dv3Q5L7Hrgpv/7NqOfS+MFz7++dgYumSPFNVU/36jWtEvxM43X2AOEmOALuAOYNe/S32\njTHvA6+VdO3v31y/v4P8bg871egbyVcbV9BvBl7tWT8L/MyYnmuk9/Lo9wsyzEjk2mC9st9SRte9\n+8435WKga7VaaODR73d7ocea7y+HfvvMV8so/v/3s5wvRuOauvk3wJ1V9W+79fuBn6mqX+3psxfY\n263+c+DFkRcynJuAv1/hGlYbj8nVPB5X83i82XIfk39WVRP9Oo1rRH8O2NqzvqVr+4GqOgQcGtPz\nDy3J9CBzXeuJx+RqHo+reTzebLUek3GdR/8NYFuSm5P8I2A3cGxMzyVJWsBYRvRVdSnJrwJ/yuzp\nlY9U1clxPJckaWFjO4++qh4HHh/X44/BqplGWkU8JlfzeFzN4/Fmq/KYjOXNWEnS6uG9biSpces2\n6JPcmOSJJC91329YoO+GJP87yVeXs8blNsgxSbI1yVNJTiU5meShlah1nJLcmeTFJKeT7Jtje5L8\nl277c0l+aiXqXC4DHI9f6I7DXyf5iyTvWYk6l0u/49HT76eTXOpON19R6zbogX3A8araBhzv1ufz\nEPDCslS1sgY5JpeAT1TVduC9wINJti9jjWPV3b7jvwIfArYD983x830I2NZ97QU+t6xFLqMBj8fL\nwL+qqn8B/CdW6Tz1KAx4PK70+y3gfy1vhXNbz0G/CzjcLR8G7pmrU5ItwN3AF5aprpXU95hU1fmq\n+ma3/D1mXwA3L1uF4/eD23dU1f8Frty+o9cu4L/XrK8DP55k03IXukz6Ho+q+ouq+na3+nVmr5tp\n1SC/HwAfA74MXFjO4uaznoN+Y1Wd75ZfAzbO0++zwK8BbyxLVStr0GMCQJJJ4Fbg6fGWtazmun3H\ntS9kg/RpxbA/6wPAH4+1opXV93gk2Qx8hFX0l17T96NP8iTwT+fY9B96V6qqkrzp9KMkHwYuVNUz\nSd43niqX11KPSc/jvI3ZEcvHq+q7o61Sa1GSn2M26H92pWtZYZ8Ffr2q3kiy0rUAjQd9VX1wvm1J\nXk+yqarOd392z/Un1m3Azye5C3gr8PYkv1tVvzimksduBMeEJNcxG/JfqqqvjKnUldL39h0D9mnF\nQD9rkn/J7PTmh6rqW8tU20oY5HhMAUe6kL8JuCvJpar6n8tT4put56mbY8CebnkP8Oi1Hapqf1Vt\nqapJZm/j8GdrOeQH0PeYZPa394vAC1X16WWsbbkMcvuOY8AvdWffvBf4Pz1TXq3pezySvBP4CnB/\nVf3tCtS4nPoej6q6uaomu9z4Q+BXVjLkYX0H/UHg9iQvAR/s1knyjiRr6YreURrkmNwG3A+8P8mz\n3dddK1Pu6FXVJeDK7TteAI5W1ckkv5zkl7tujwN/B5wGPg/8yooUuwwGPB7/EfgJ4OHu96HZTxEa\n8HisOl4ZK0mNW88jeklaFwx6SWqcQS9JjTPoJalxBr0kNc6gl6TGGfSS1DiDXpIa9/8BIVZE8NI2\nBgUAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x7f23886307f0>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "_ = plt.hist(final_params_flat, bins=200)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
