{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "%matplotlib inline\n",
    "import os\n",
    "import sys\n",
    "N_up = 1\n",
    "nb_dir = '/'.join(os.getcwd().split('/')[:-N_up])\n",
    "if nb_dir not in sys.path:\n",
    "    sys.path.append(nb_dir)\n",
    "\n",
    "                                                            \n",
    "c = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', \n",
    "     '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] \n",
    "\n",
    "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"6\""
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Setup plotting enviroment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import print_function, division\n",
    "%matplotlib inline\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "from torch import nn, optim\n",
    "from torch.autograd import Variable\n",
    "from torch.optim import Optimizer\n",
    "\n",
    "\n",
    "import collections\n",
    "import sys\n",
    "import gzip\n",
    "import os\n",
    "import math\n",
    "\n",
    "import time\n",
    "import torch.utils.data\n",
    "from torchvision import transforms, datasets\n",
    "import scipy.ndimage as ndim\n",
    "\n",
    "try:\n",
    "    import cPickle as pickle\n",
    "except:\n",
    "    import pickle as pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "import torch\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "from src.utils import Datafeed\n",
    "from src.datasets import make_spirals\n",
    "from net_wrappers import MF_BNN_cat\n",
    "from src.probability import variational_categorical, fixed_probs\n",
    "from train_BNN import train_VI_classification\n",
    "from stochastic_resnet_models import arq_uncert_conv2d_resnet\n",
    "\n",
    "from src.plots import evaluate_train_test, errorfill, evaluate_train_test_at_d\n",
    "\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "text_width = 5.50107 # in  --> Confirmed with template explanation\n",
    "\n",
    "import matplotlib\n",
    "matplotlib.use('agg')\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "fs_m1 = 6\n",
    "fs = 8\n",
    "fs_p1 = 8\n",
    "\n",
    "matplotlib.rc('font', serif='Times')\n",
    "    \n",
    "matplotlib.rc('font', size=fs)          # controls default text sizes\n",
    "matplotlib.rc('axes', titlesize=fs)     # fontsize of the axes title\n",
    "matplotlib.rc('axes', labelsize=fs)    # fontsize of the x and y labels\n",
    "matplotlib.rc('xtick', labelsize=fs_m1)    # fontsize of the tick labels\n",
    "matplotlib.rc('ytick', labelsize=fs_m1)    # fontsize of the tick labels\n",
    "matplotlib.rc('legend', fontsize=fs)    # legend fontsize\n",
    "matplotlib.rc('figure', titlesize=fs_p1)  # fontsize of the figure title\n",
    "\n",
    "\n",
    "matplotlib.rc('font',**{'family':'serif','serif':['Palatino']})\n",
    "matplotlib.rcParams['text.latex.preamble'] = [\n",
    "       r'\\usepackage{siunitx}',   # i need upright \\micro symbols, but you need...\n",
    "       r'\\sisetup{detect-all}',   # ...this to force siunitx to actually use your fonts\n",
    "       r'\\usepackage{helvet}',    # set the normal font here\n",
    "       r'\\usepackage{sansmath}',  # load up the sansmath so that math -> helvet\n",
    "       r'\\sansmath'               # <- tricky! -- gotta actually tell tex to use!\n",
    "]  \n",
    "matplotlib.rc('text', usetex=True)\n",
    "\n",
    "\n",
    "matplotlib.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath}\"]\n",
    "\n",
    "base_c10 = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',\n",
    "            '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\n",
    "\n",
    "base_c11k = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',\n",
    "            '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf', '#000000']\n",
    "\n",
    "i = range(len(base_c10))\n",
    "a = [5]*len(base_c10)\n",
    "\n",
    "plt.figure(dpi=80)\n",
    "plt.bar(i,a, color=base_c10)\n",
    "plt.title('Test')\n",
    "plt.xlabel('test')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from src.plots import errorfill\n",
    "\n",
    "plot_savedir = '../saves/paper_plots/'\n",
    "datadir = plot_savedir + 'data/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_dir = '../saves/logs/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_run_mean_std(struct, last_max=False, last_min=False, last_argmax=False, last_argmin=False):\n",
    "    \n",
    "    means = []\n",
    "    stds = []\n",
    "    for exp in struct:\n",
    "        \n",
    "        if last_max:\n",
    "            exp = exp.max(axis=1)\n",
    "        elif last_min:\n",
    "            exp = exp.min(axis=1)\n",
    "        elif last_argmax:\n",
    "            exp = exp.argmax(axis=1)\n",
    "        elif last_argmin:\n",
    "            exp = exp.argmin(axis=1)\n",
    "        \n",
    "        means.append(np.nanmean(exp, axis=0))\n",
    "        stds.append(np.nanstd(exp, axis=0))\n",
    "        \n",
    "    return np.array(means), np.array(stds)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data: scan over depth - MNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "experiment_dir = save_dir + 'CNN_BNN_MNIST_cat/' + 'max_depth_scan/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 1\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n",
    "])\n",
    "\n",
    "trainset = datasets.MNIST(root='../data', train=True, download=True, transform=transform_train)\n",
    "valset = datasets.MNIST(root='../data', train=False, download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_depths_MNIST = []\n",
    "per_max_depth_d_exp_MNIST = []\n",
    "per_max_depth_d_95th_MNIST = []\n",
    "per_max_depth_q_MNIST = []\n",
    "per_max_depth_KL_MNIST = []\n",
    "\n",
    "# per_max_depth_train_loglike_MNIST = []\n",
    "# per_max_depth_train_err_MNIST = []\n",
    "per_max_depth_test_loglike_MNIST = []\n",
    "per_max_depth_test_err_MNIST = []\n",
    "\n",
    "# per_max_depth_train_loglike_d_exp_MNIST = []\n",
    "# per_max_depth_train_err_d_exp_MNIST = []\n",
    "per_max_depth_test_loglike_d_exp_MNIST = []\n",
    "per_max_depth_test_err_d_exp_MNIST = []\n",
    "\n",
    "# per_max_depth_train_loglike_d_95th_MNIST = []\n",
    "# per_max_depth_train_err_d_95th_MNIST = []\n",
    "per_max_depth_test_loglike_d_95th_MNIST = []\n",
    "per_max_depth_test_err_d_95th_MNIST = []\n",
    "\n",
    "#iterate over experiment settings\n",
    "exp_folders = os.listdir(experiment_dir)\n",
    "for exp_folder in exp_folders: # loop through all the files and folders\n",
    "    full_exp_folder = os.path.join(experiment_dir, exp_folder)\n",
    "    if os.path.isdir(full_exp_folder): # check whether the current object is a folder or not\n",
    "        \n",
    "        if exp_folder[0] == '.':\n",
    "            continue\n",
    "        \n",
    "        print(int(exp_folder))\n",
    "    \n",
    "        prior_probs = 0.85 ** (1 + np.arange(int(exp_folder) + 1))\n",
    "        prior_probs = prior_probs / prior_probs.sum()\n",
    "        prob_model = variational_categorical(int(exp_folder), prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                     inner_width, int(exp_folder), prob_model)  \n",
    "        N_train = len(trainset)\n",
    "        lr = 1e-1\n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "        max_depths_MNIST.append(int(exp_folder))\n",
    "        \n",
    "        per_max_depth_q_MNIST.append([])\n",
    "        per_max_depth_KL_MNIST.append([])\n",
    "        per_max_depth_d_exp_MNIST.append([])\n",
    "        per_max_depth_d_95th_MNIST.append([])\n",
    "        # per_max_depth_train_loglike_MNIST.append([])\n",
    "        # per_max_depth_train_err_MNIST.append([])\n",
    "        per_max_depth_test_loglike_MNIST.append([])\n",
    "        per_max_depth_test_err_MNIST.append([])\n",
    "        \n",
    "        # per_max_depth_train_loglike_d_exp_MNIST.append([])\n",
    "        # per_max_depth_train_err_d_exp_MNIST.append([])\n",
    "        per_max_depth_test_loglike_d_exp_MNIST.append([])\n",
    "        per_max_depth_test_err_d_exp_MNIST.append([])\n",
    "\n",
    "        # per_max_depth_train_loglike_d_95th_MNIST.append([])\n",
    "        # per_max_depth_train_err_d_95th_MNIST.append([])\n",
    "        per_max_depth_test_loglike_d_95th_MNIST.append([])\n",
    "        per_max_depth_test_err_d_95th_MNIST.append([])\n",
    "        \n",
    "        N_run_folders = os.listdir(full_exp_folder)\n",
    "        for N_run_folder in N_run_folders: # loop through all the files and folders\n",
    "            model_folder = os.path.join(full_exp_folder, N_run_folder, 'models')\n",
    "            if os.path.isdir(model_folder): # check whether the current object is a folder or not\n",
    "                \n",
    "                try:\n",
    "                    net.load(model_folder + '/theta_best.dat')\n",
    "                except:\n",
    "                    print('could not load: ' + model_folder)\n",
    "                    continue\n",
    "                \n",
    "                per_max_depth_q_MNIST[-1].append(net.model.prob_model.get_q_probs().data.cpu().numpy())\n",
    "                per_max_depth_KL_MNIST[-1].append(net.model.get_KL().item())\n",
    "                \n",
    "                loglike_tests, err_tests, loglike_tests_exp, err_tests_exp, loglike_tests_95th, err_tests_95th = [], [], [], [], [], []\n",
    "        \n",
    "                probs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "                cuttoff = np.max(probs)*0.95\n",
    "                depth_exp = np.sum(probs * np.arange(net.model.n_layers + 1))\n",
    "                depth_95th = np.argmax(probs > cuttoff)\n",
    "                per_max_depth_d_exp_MNIST[-1].append(depth_exp)\n",
    "                per_max_depth_d_95th_MNIST[-1].append(depth_95th)\n",
    "\n",
    "                for x, y in valloader:\n",
    "                    if cuda:\n",
    "                        y = y.cuda()\n",
    "\n",
    "                    probs = net.sample_predict(x).sum(dim=0)\n",
    "                    loglike = -F.nll_loss(probs, y, reduction='sum').item()\n",
    "                    pred = probs.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err = pred.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests.append(loglike)\n",
    "                    err_tests.append(err)\n",
    "\n",
    "                    probs_exp = net.partial_predict(x, int(round(depth_exp))).sum(dim=0)\n",
    "                    loglike_exp = -F.nll_loss(probs_exp, y, reduction='sum').item()\n",
    "                    pred_exp = probs_exp.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err_exp = pred_exp.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests_exp.append(loglike_exp)\n",
    "                    err_tests_exp.append(err_exp)\n",
    "\n",
    "                    probs_95th = net.partial_predict(x, depth_95th).sum(dim=0)\n",
    "                    loglike_95th = -F.nll_loss(probs_95th, y, reduction='sum').item()\n",
    "                    pred_95th = probs_95th.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err_95th = pred_95th.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests_95th.append(loglike_95th)\n",
    "                    err_tests_95th.append(err_95th)\n",
    "\n",
    "                N_test = len(valset)\n",
    "                loglike_test = np.sum(loglike_tests) / N_test\n",
    "                err_test= np.sum(err_tests) / N_test\n",
    "                loglike_test_exp= np.sum(loglike_tests_exp) / N_test\n",
    "                err_test_exp = np.sum(err_tests_exp) / N_test\n",
    "                loglike_test_95th = np.sum(loglike_tests_95th) / N_test\n",
    "                err_test_95th = np.sum(err_tests_95th) / N_test\n",
    "                    \n",
    "                # per_max_depth_train_loglike_MNIST[-1].append(loglike_train)\n",
    "                # per_max_depth_train_err_MNIST[-1].append(err_train)\n",
    "                per_max_depth_test_loglike_MNIST[-1].append(loglike_test)\n",
    "                per_max_depth_test_err_MNIST[-1].append(err_test)\n",
    "                # per_max_depth_train_loglike_d_exp_MNIST[-1].append(loglike_train_exp)\n",
    "                # per_max_depth_train_err_d_exp_MNIST[-1].append(err_train_exp)\n",
    "                per_max_depth_test_loglike_d_exp_MNIST[-1].append(loglike_test_exp)\n",
    "                per_max_depth_test_err_d_exp_MNIST[-1].append(err_test_exp)\n",
    "                # per_max_depth_train_loglike_d_95th_MNIST[-1].append(loglike_train_95th)\n",
    "                # per_max_depth_train_err_d_95th_MNIST[-1].append(err_train_95th)\n",
    "                per_max_depth_test_loglike_d_95th_MNIST[-1].append(loglike_test_95th)\n",
    "                per_max_depth_test_err_d_95th_MNIST[-1].append(err_test_95th)\n",
    "                \n",
    "        per_max_depth_q_MNIST[-1] = np.stack(per_max_depth_q_MNIST[-1], axis=0)\n",
    "        per_max_depth_KL_MNIST[-1] = np.array(per_max_depth_KL_MNIST[-1])\n",
    "        # per_max_depth_train_loglike_MNIST[-1] = np.array(per_max_depth_train_loglike_MNIST[-1])\n",
    "        # per_max_depth_train_err_MNIST[-1] = np.array(per_max_depth_train_err_MNIST[-1])\n",
    "        per_max_depth_test_loglike_MNIST[-1] = np.array(per_max_depth_test_loglike_MNIST[-1])\n",
    "        per_max_depth_test_err_MNIST[-1] = np.array(per_max_depth_test_err_MNIST[-1])\n",
    "        \n",
    "        # per_max_depth_train_loglike_d_MNIST[-1] = np.array(per_max_depth_train_loglike_d_MNIST[-1])\n",
    "        # per_max_depth_train_err_d_MNIST[-1] = np.array(per_max_depth_train_err_d_MNIST[-1])\n",
    "        per_max_depth_test_loglike_d_exp_MNIST[-1] = np.array(per_max_depth_test_loglike_d_exp_MNIST[-1])\n",
    "        per_max_depth_test_err_d_exp_MNIST[-1] = np.array(per_max_depth_test_err_d_exp_MNIST[-1])\n",
    "\n",
    "        # per_max_depth_train_loglike_d_MNIST[-1] = np.array(per_max_depth_train_loglike_d_MNIST[-1])\n",
    "        # per_max_depth_train_err_d_MNIST[-1] = np.array(per_max_depth_train_err_d_MNIST[-1])\n",
    "        per_max_depth_test_loglike_d_95th_MNIST[-1] = np.array(per_max_depth_test_loglike_d_95th_MNIST[-1])\n",
    "        per_max_depth_test_err_d_95th_MNIST[-1] = np.array(per_max_depth_test_err_d_95th_MNIST[-1])     \n",
    "                       \n",
    "sort_idxs = np.argsort(max_depths_MNIST)\n",
    "\n",
    "max_depths_MNIST = np.array(max_depths_MNIST)[sort_idxs]\n",
    "\n",
    "per_max_depth_q_MNIST = [per_max_depth_q_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_KL_MNIST = [per_max_depth_KL_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_d_exp_MNIST = [per_max_depth_d_exp_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_d_95th_MNIST = [per_max_depth_d_95th_MNIST[i] for i in sort_idxs]\n",
    "\n",
    "# per_max_depth_train_loglike_MNIST = [per_max_depth_train_loglike_MNIST[i] for i in sort_idxs]\n",
    "# per_max_depth_train_err_MNIST = [per_max_depth_train_err_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_test_loglike_MNIST = [per_max_depth_test_loglike_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_test_err_MNIST = [per_max_depth_test_err_MNIST[i] for i in sort_idxs]\n",
    "\n",
    "# per_max_depth_train_loglike_d_exp_MNIST = [per_max_depth_train_loglike_d_exp_MNIST[i] for i in sort_idxs]\n",
    "# per_max_depth_train_err_d_exp_MNIST = [per_max_depth_train_err_d_exp_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_test_loglike_d_exp_MNIST = [per_max_depth_test_loglike_d_exp_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_test_err_d_exp_MNIST = [per_max_depth_test_err_d_exp_MNIST[i] for i in sort_idxs]\n",
    "\n",
    "# per_max_depth_train_loglike_d_95th_MNIST = [per_max_depth_train_loglike_d_95th_MNIST[i] for i in sort_idxs]\n",
    "# per_max_depth_train_err_d_95th_MNIST = [per_max_depth_train_err_d_95th_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_test_loglike_d_95th_MNIST = [per_max_depth_test_loglike_d_95th_MNIST[i] for i in sort_idxs]\n",
    "per_max_depth_test_err_d_95th_MNIST = [per_max_depth_test_err_d_95th_MNIST[i] for i in sort_idxs]    "
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data: scan over depth - SVHN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "experiment_dir = save_dir + 'CNN_BNN_SVHN_cat/' + 'max_depth_scan/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 3\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.431, 0.430, 0.446), (0.197, 0.198, 0.199))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.431, 0.430, 0.446), (0.197, 0.198, 0.199))\n",
    "])\n",
    "\n",
    "trainset = datasets.SVHN(root='../data', split=\"train\", download=True, transform=transform_train)\n",
    "valset = datasets.SVHN(root='../data', split=\"test\", download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_depths_SVHN = []\n",
    "per_max_depth_d_exp_SVHN = []\n",
    "per_max_depth_d_95th_SVHN = []\n",
    "per_max_depth_q_SVHN = []\n",
    "per_max_depth_KL_SVHN = []\n",
    "\n",
    "# per_max_depth_train_loglike_SVHN = []\n",
    "# per_max_depth_train_err_SVHN = []\n",
    "per_max_depth_test_loglike_SVHN = []\n",
    "per_max_depth_test_err_SVHN = []\n",
    "\n",
    "# per_max_depth_train_loglike_d_exp_SVHN = []\n",
    "# per_max_depth_train_err_d_exp_SVHN = []\n",
    "per_max_depth_test_loglike_d_exp_SVHN = []\n",
    "per_max_depth_test_err_d_exp_SVHN = []\n",
    "\n",
    "# per_max_depth_train_loglike_d_95th_SVHN = []\n",
    "# per_max_depth_train_err_d_95th_SVHN = []\n",
    "per_max_depth_test_loglike_d_95th_SVHN = []\n",
    "per_max_depth_test_err_d_95th_SVHN = []\n",
    "\n",
    "#iterate over experiment settings\n",
    "exp_folders = os.listdir(experiment_dir)\n",
    "for exp_folder in exp_folders: # loop through all the files and folders\n",
    "    full_exp_folder = os.path.join(experiment_dir, exp_folder)\n",
    "    if os.path.isdir(full_exp_folder): # check whether the current object is a folder or not\n",
    "        \n",
    "        if exp_folder[0] == '.':\n",
    "            continue\n",
    "        \n",
    "        print(int(exp_folder))\n",
    "    \n",
    "        prior_probs = 0.85 ** (1 + np.arange(int(exp_folder) + 1))\n",
    "        prior_probs = prior_probs / prior_probs.sum()\n",
    "        prob_model = variational_categorical(int(exp_folder), prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                     inner_width, int(exp_folder), prob_model)  \n",
    "        N_train = len(trainset)\n",
    "        lr = 1e-1\n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "        max_depths_SVHN.append(int(exp_folder))\n",
    "        \n",
    "        per_max_depth_q_SVHN.append([])\n",
    "        per_max_depth_KL_SVHN.append([])\n",
    "        per_max_depth_d_exp_SVHN.append([])\n",
    "        per_max_depth_d_95th_SVHN.append([])\n",
    "        # per_max_depth_train_loglike_SVHN.append([])\n",
    "        # per_max_depth_train_err_SVHN.append([])\n",
    "        per_max_depth_test_loglike_SVHN.append([])\n",
    "        per_max_depth_test_err_SVHN.append([])\n",
    "        \n",
    "        # per_max_depth_train_loglike_d_exp_SVHN.append([])\n",
    "        # per_max_depth_train_err_d_exp_SVHN.append([])\n",
    "        per_max_depth_test_loglike_d_exp_SVHN.append([])\n",
    "        per_max_depth_test_err_d_exp_SVHN.append([])\n",
    "\n",
    "        # per_max_depth_train_loglike_d_95th_SVHN.append([])\n",
    "        # per_max_depth_train_err_d_95th_SVHN.append([])\n",
    "        per_max_depth_test_loglike_d_95th_SVHN.append([])\n",
    "        per_max_depth_test_err_d_95th_SVHN.append([])\n",
    "        \n",
    "        N_run_folders = os.listdir(full_exp_folder)\n",
    "        for N_run_folder in N_run_folders: # loop through all the files and folders\n",
    "            model_folder = os.path.join(full_exp_folder, N_run_folder, 'models')\n",
    "            if os.path.isdir(model_folder): # check whether the current object is a folder or not\n",
    "                \n",
    "                try:\n",
    "                    net.load(model_folder + '/theta_best.dat')\n",
    "                except:\n",
    "                    print('could not load: ' + model_folder)\n",
    "                    continue\n",
    "                \n",
    "                per_max_depth_q_SVHN[-1].append(net.model.prob_model.get_q_probs().data.cpu().numpy())\n",
    "                per_max_depth_KL_SVHN[-1].append(net.model.get_KL().item())\n",
    "                \n",
    "                loglike_tests, err_tests, loglike_tests_exp, err_tests_exp, loglike_tests_95th, err_tests_95th = [], [], [], [], [], []\n",
    "        \n",
    "                probs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "                cuttoff = np.max(probs)*0.95\n",
    "                depth_exp = np.sum(probs * np.arange(net.model.n_layers + 1))\n",
    "                depth_95th = np.argmax(probs > cuttoff)\n",
    "                per_max_depth_d_exp_SVHN[-1].append(depth_exp)\n",
    "                per_max_depth_d_95th_SVHN[-1].append(depth_95th)\n",
    "\n",
    "                for x, y in valloader:\n",
    "                    if cuda:\n",
    "                        y = y.cuda()\n",
    "\n",
    "                    probs = net.sample_predict(x).sum(dim=0)\n",
    "                    loglike = -F.nll_loss(probs, y, reduction='sum').item()\n",
    "                    pred = probs.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err = pred.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests.append(loglike)\n",
    "                    err_tests.append(err)\n",
    "\n",
    "                    probs_exp = net.partial_predict(x, int(round(depth_exp))).sum(dim=0)\n",
    "                    loglike_exp = -F.nll_loss(probs_exp, y, reduction='sum').item()\n",
    "                    pred_exp = probs_exp.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err_exp = pred_exp.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests_exp.append(loglike_exp)\n",
    "                    err_tests_exp.append(err_exp)\n",
    "\n",
    "                    probs_95th = net.partial_predict(x, depth_95th).sum(dim=0)\n",
    "                    loglike_95th = -F.nll_loss(probs_95th, y, reduction='sum').item()\n",
    "                    pred_95th = probs_95th.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err_95th = pred_95th.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests_95th.append(loglike_95th)\n",
    "                    err_tests_95th.append(err_95th)\n",
    "\n",
    "                N_test = len(valset)\n",
    "                loglike_test = np.sum(loglike_tests) / N_test\n",
    "                err_test= np.sum(err_tests) / N_test\n",
    "                loglike_test_exp= np.sum(loglike_tests_exp) / N_test\n",
    "                err_test_exp = np.sum(err_tests_exp) / N_test\n",
    "                loglike_test_95th = np.sum(loglike_tests_95th) / N_test\n",
    "                err_test_95th = np.sum(err_tests_95th) / N_test\n",
    "                    \n",
    "                # per_max_depth_train_loglike_SVHN[-1].append(loglike_train)\n",
    "                # per_max_depth_train_err_SVHN[-1].append(err_train)\n",
    "                per_max_depth_test_loglike_SVHN[-1].append(loglike_test)\n",
    "                per_max_depth_test_err_SVHN[-1].append(err_test)\n",
    "                # per_max_depth_train_loglike_d_exp_SVHN[-1].append(loglike_train_exp)\n",
    "                # per_max_depth_train_err_d_exp_SVHN[-1].append(err_train_exp)\n",
    "                per_max_depth_test_loglike_d_exp_SVHN[-1].append(loglike_test_exp)\n",
    "                per_max_depth_test_err_d_exp_SVHN[-1].append(err_test_exp)\n",
    "                # per_max_depth_train_loglike_d_95th_SVHN[-1].append(loglike_train_95th)\n",
    "                # per_max_depth_train_err_d_95th_SVHN[-1].append(err_train_95th)\n",
    "                per_max_depth_test_loglike_d_95th_SVHN[-1].append(loglike_test_95th)\n",
    "                per_max_depth_test_err_d_95th_SVHN[-1].append(err_test_95th)\n",
    "                \n",
    "        per_max_depth_q_SVHN[-1] = np.stack(per_max_depth_q_SVHN[-1], axis=0)\n",
    "        per_max_depth_KL_SVHN[-1] = np.array(per_max_depth_KL_SVHN[-1])\n",
    "        # per_max_depth_train_loglike_SVHN[-1] = np.array(per_max_depth_train_loglike_SVHN[-1])\n",
    "        # per_max_depth_train_err_SVHN[-1] = np.array(per_max_depth_train_err_SVHN[-1])\n",
    "        per_max_depth_test_loglike_SVHN[-1] = np.array(per_max_depth_test_loglike_SVHN[-1])\n",
    "        per_max_depth_test_err_SVHN[-1] = np.array(per_max_depth_test_err_SVHN[-1])\n",
    "        \n",
    "        # per_max_depth_train_loglike_d_SVHN[-1] = np.array(per_max_depth_train_loglike_d_SVHN[-1])\n",
    "        # per_max_depth_train_err_d_SVHN[-1] = np.array(per_max_depth_train_err_d_SVHN[-1])\n",
    "        per_max_depth_test_loglike_d_exp_SVHN[-1] = np.array(per_max_depth_test_loglike_d_exp_SVHN[-1])\n",
    "        per_max_depth_test_err_d_exp_SVHN[-1] = np.array(per_max_depth_test_err_d_exp_SVHN[-1])\n",
    "\n",
    "        # per_max_depth_train_loglike_d_SVHN[-1] = np.array(per_max_depth_train_loglike_d_SVHN[-1])\n",
    "        # per_max_depth_train_err_d_SVHN[-1] = np.array(per_max_depth_train_err_d_SVHN[-1])\n",
    "        per_max_depth_test_loglike_d_95th_SVHN[-1] = np.array(per_max_depth_test_loglike_d_95th_SVHN[-1])\n",
    "        per_max_depth_test_err_d_95th_SVHN[-1] = np.array(per_max_depth_test_err_d_95th_SVHN[-1])   \n",
    "                       \n",
    "sort_idxs_SVHN = np.argsort(max_depths_SVHN)\n",
    "\n",
    "max_depths_SVHN = np.array(max_depths_SVHN)[sort_idxs_SVHN]\n",
    "\n",
    "per_max_depth_q_SVHN = [per_max_depth_q_SVHN[i] for i in sort_idxs_SVHN]\n",
    "per_max_depth_KL_SVHN = [per_max_depth_KL_SVHN[i] for i in sort_idxs_SVHN]\n",
    "per_max_depth_d_exp_SVHN = [per_max_depth_d_exp_SVHN[i] for i in sort_idxs_SVHN]\n",
    "per_max_depth_d_95th_SVHN = [per_max_depth_d_95th_SVHN[i] for i in sort_idxs_SVHN]\n",
    "\n",
    "# per_max_depth_train_loglike_SVHN = [per_max_depth_train_loglike_SVHN[i] for i in sort_idxs_SVHN]\n",
    "# per_max_depth_train_err_SVHN = [per_max_depth_train_err_SVHN[i] for i in sort_idxs_SVHN]\n",
    "per_max_depth_test_loglike_SVHN = [per_max_depth_test_loglike_SVHN[i] for i in sort_idxs_SVHN]\n",
    "per_max_depth_test_err_SVHN = [per_max_depth_test_err_SVHN[i] for i in sort_idxs_SVHN]\n",
    "\n",
    "\n",
    "# per_max_depth_train_loglike_d_exp_SVHN = [per_max_depth_train_loglike_d_exp_SVHN[i] for i in sort_idxs]\n",
    "# per_max_depth_train_err_d_exp_SVHN = [per_max_depth_train_err_d_exp_SVHN[i] for i in sort_idxs]\n",
    "per_max_depth_test_loglike_d_exp_SVHN = [per_max_depth_test_loglike_d_exp_SVHN[i] for i in sort_idxs_SVHN]\n",
    "per_max_depth_test_err_d_exp_SVHN = [per_max_depth_test_err_d_exp_SVHN[i] for i in sort_idxs_SVHN]\n",
    "\n",
    "# per_max_depth_train_loglike_d_95th_SVHN = [per_max_depth_train_loglike_d_95th_SVHN[i] for i in sort_idxs_SVHN]\n",
    "# per_max_depth_train_err_d_95th_SVHN = [per_max_depth_train_err_d_95th_SVHN[i] for i in sort_idxs_SVHN]\n",
    "per_max_depth_test_loglike_d_95th_SVHN = [per_max_depth_test_loglike_d_95th_SVHN[i] for i in sort_idxs_SVHN]\n",
    "per_max_depth_test_err_d_95th_SVHN = [per_max_depth_test_err_d_95th_SVHN[i] for i in sort_idxs_SVHN]       "
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data: scan over depth - FashionMNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "experiment_dir = save_dir + 'CNN_BNN_FashionMNIST_cat/' + 'max_depth_scan/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 1\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.2860,), std=(0.3530,))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.2860,), std=(0.3530,))\n",
    "])\n",
    "\n",
    "trainset = datasets.FashionMNIST(root='../data', train=True, download=True, transform=transform_train)\n",
    "valset = datasets.FashionMNIST(root='../data', train=False, download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_depths_FashionMNIST = []\n",
    "per_max_depth_d_exp_FashionMNIST = []\n",
    "per_max_depth_d_95th_FashionMNIST = []\n",
    "per_max_depth_q_FashionMNIST = []\n",
    "per_max_depth_KL_FashionMNIST = []\n",
    "\n",
    "# per_max_depth_train_loglike_FashionMNIST = []\n",
    "# per_max_depth_train_err_FashionMNIST = []\n",
    "per_max_depth_test_loglike_FashionMNIST = []\n",
    "per_max_depth_test_err_FashionMNIST = []\n",
    "\n",
    "# per_max_depth_train_loglike_d_exp_FashionMNIST = []\n",
    "# per_max_depth_train_err_d_exp_FashionMNIST = []\n",
    "per_max_depth_test_loglike_d_exp_FashionMNIST = []\n",
    "per_max_depth_test_err_d_exp_FashionMNIST = []\n",
    "\n",
    "# per_max_depth_train_loglike_d_95th_FashionMNIST = []\n",
    "# per_max_depth_train_err_d_95th_FashionMNIST = []\n",
    "per_max_depth_test_loglike_d_95th_FashionMNIST = []\n",
    "per_max_depth_test_err_d_95th_FashionMNIST = []\n",
    "\n",
    "#iterate over experiment settings\n",
    "exp_folders = os.listdir(experiment_dir)\n",
    "for exp_folder in exp_folders: # loop through all the files and folders\n",
    "    full_exp_folder = os.path.join(experiment_dir, exp_folder)\n",
    "    if os.path.isdir(full_exp_folder): # check whether the current object is a folder or not\n",
    "        \n",
    "        if exp_folder[0] == '.':\n",
    "            continue\n",
    "        \n",
    "        print(int(exp_folder))\n",
    "    \n",
    "        prior_probs = 0.85 ** (1 + np.arange(int(exp_folder) + 1))\n",
    "        prior_probs = prior_probs / prior_probs.sum()\n",
    "        prob_model = variational_categorical(int(exp_folder), prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                     inner_width, int(exp_folder), prob_model)  \n",
    "        N_train = len(trainset)\n",
    "        lr = 1e-1\n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "        max_depths_FashionMNIST.append(int(exp_folder))\n",
    "        \n",
    "        per_max_depth_q_FashionMNIST.append([])\n",
    "        per_max_depth_KL_FashionMNIST.append([])\n",
    "        per_max_depth_d_exp_FashionMNIST.append([])\n",
    "        per_max_depth_d_95th_FashionMNIST.append([])\n",
    "        # per_max_depth_train_loglike_FashionMNIST.append([])\n",
    "        # per_max_depth_train_err_FashionMNIST.append([])\n",
    "        per_max_depth_test_loglike_FashionMNIST.append([])\n",
    "        per_max_depth_test_err_FashionMNIST.append([])\n",
    "        \n",
    "        # per_max_depth_train_loglike_d_exp_FashionMNIST.append([])\n",
    "        # per_max_depth_train_err_d_exp_FashionMNIST.append([])\n",
    "        per_max_depth_test_loglike_d_exp_FashionMNIST.append([])\n",
    "        per_max_depth_test_err_d_exp_FashionMNIST.append([])\n",
    "\n",
    "        # per_max_depth_train_loglike_d_95th_FashionMNIST.append([])\n",
    "        # per_max_depth_train_err_d_95th_FashionMNIST.append([])\n",
    "        per_max_depth_test_loglike_d_95th_FashionMNIST.append([])\n",
    "        per_max_depth_test_err_d_95th_FashionMNIST.append([])\n",
    "        \n",
    "        N_run_folders = os.listdir(full_exp_folder)\n",
    "        for N_run_folder in N_run_folders: # loop through all the files and folders\n",
    "            model_folder = os.path.join(full_exp_folder, N_run_folder, 'models')\n",
    "            if os.path.isdir(model_folder): # check whether the current object is a folder or not\n",
    "                \n",
    "                try:\n",
    "                    net.load(model_folder + '/theta_best.dat')\n",
    "                except:\n",
    "                    print('could not load: ' + model_folder)\n",
    "                    continue\n",
    "                \n",
    "                per_max_depth_q_FashionMNIST[-1].append(net.model.prob_model.get_q_probs().data.cpu().numpy())\n",
    "                per_max_depth_KL_FashionMNIST[-1].append(net.model.get_KL().item())\n",
    "                \n",
    "                loglike_tests, err_tests, loglike_tests_exp, err_tests_exp, loglike_tests_95th, err_tests_95th = [], [], [], [], [], []\n",
    "        \n",
    "                probs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "                cuttoff = np.max(probs)*0.95\n",
    "                depth_exp = np.sum(probs * np.arange(net.model.n_layers + 1))\n",
    "                depth_95th = np.argmax(probs > cuttoff)\n",
    "                per_max_depth_d_exp_FashionMNIST[-1].append(depth_exp)\n",
    "                per_max_depth_d_95th_FashionMNIST[-1].append(depth_95th)\n",
    "\n",
    "                for x, y in valloader:\n",
    "                    if cuda:\n",
    "                        y = y.cuda()\n",
    "\n",
    "                    probs = net.sample_predict(x).sum(dim=0)\n",
    "                    loglike = -F.nll_loss(probs, y, reduction='sum').item()\n",
    "                    pred = probs.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err = pred.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests.append(loglike)\n",
    "                    err_tests.append(err)\n",
    "\n",
    "                    probs_exp = net.partial_predict(x, int(round(depth_exp))).sum(dim=0)\n",
    "                    loglike_exp = -F.nll_loss(probs_exp, y, reduction='sum').item()\n",
    "                    pred_exp = probs_exp.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err_exp = pred_exp.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests_exp.append(loglike_exp)\n",
    "                    err_tests_exp.append(err_exp)\n",
    "\n",
    "                    probs_95th = net.partial_predict(x, depth_95th).sum(dim=0)\n",
    "                    loglike_95th = -F.nll_loss(probs_95th, y, reduction='sum').item()\n",
    "                    pred_95th = probs_95th.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err_95th = pred_95th.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests_95th.append(loglike_95th)\n",
    "                    err_tests_95th.append(err_95th)\n",
    "\n",
    "                N_test = len(valset)\n",
    "                loglike_test = np.sum(loglike_tests) / N_test\n",
    "                err_test= np.sum(err_tests) / N_test\n",
    "                loglike_test_exp= np.sum(loglike_tests_exp) / N_test\n",
    "                err_test_exp = np.sum(err_tests_exp) / N_test\n",
    "                loglike_test_95th = np.sum(loglike_tests_95th) / N_test\n",
    "                err_test_95th = np.sum(err_tests_95th) / N_test\n",
    "                    \n",
    "                # per_max_depth_train_loglike_FashionMNIST[-1].append(loglike_train)\n",
    "                # per_max_depth_train_err_FashionMNIST[-1].append(err_train)\n",
    "                per_max_depth_test_loglike_FashionMNIST[-1].append(loglike_test)\n",
    "                per_max_depth_test_err_FashionMNIST[-1].append(err_test)\n",
    "                # per_max_depth_train_loglike_d_exp_FashionMNIST[-1].append(loglike_train_exp)\n",
    "                # per_max_depth_train_err_d_exp_FashionMNIST[-1].append(err_train_exp)\n",
    "                per_max_depth_test_loglike_d_exp_FashionMNIST[-1].append(loglike_test_exp)\n",
    "                per_max_depth_test_err_d_exp_FashionMNIST[-1].append(err_test_exp)\n",
    "                # per_max_depth_train_loglike_d_95th_FashionMNIST[-1].append(loglike_train_95th)\n",
    "                # per_max_depth_train_err_d_95th_FashionMNIST[-1].append(err_train_95th)\n",
    "                per_max_depth_test_loglike_d_95th_FashionMNIST[-1].append(loglike_test_95th)\n",
    "                per_max_depth_test_err_d_95th_FashionMNIST[-1].append(err_test_95th)    \n",
    "        \n",
    "        per_max_depth_q_FashionMNIST[-1] = np.stack(per_max_depth_q_FashionMNIST[-1], axis=0)\n",
    "        per_max_depth_KL_FashionMNIST[-1] = np.array(per_max_depth_KL_FashionMNIST[-1])\n",
    "        # per_max_depth_train_loglike_FashionMNIST[-1] = np.array(per_max_depth_train_loglike_FashionMNIST[-1])\n",
    "        # per_max_depth_train_err_FashionMNIST[-1] = np.array(per_max_depth_train_err_FashionMNIST[-1])\n",
    "        per_max_depth_test_loglike_FashionMNIST[-1] = np.array(per_max_depth_test_loglike_FashionMNIST[-1])\n",
    "        per_max_depth_test_err_FashionMNIST[-1] = np.array(per_max_depth_test_err_FashionMNIST[-1])\n",
    "        \n",
    "        # per_max_depth_train_loglike_d_FashionMNIST[-1] = np.array(per_max_depth_train_loglike_d_FashionMNIST[-1])\n",
    "        # per_max_depth_train_err_d_FashionMNIST[-1] = np.array(per_max_depth_train_err_d_FashionMNIST[-1])\n",
    "        per_max_depth_test_loglike_d_exp_FashionMNIST[-1] = np.array(per_max_depth_test_loglike_d_exp_FashionMNIST[-1])\n",
    "        per_max_depth_test_err_d_exp_FashionMNIST[-1] = np.array(per_max_depth_test_err_d_exp_FashionMNIST[-1])\n",
    "\n",
    "        # per_max_depth_train_loglike_d_FashionMNIST[-1] = np.array(per_max_depth_train_loglike_d_FashionMNIST[-1])\n",
    "        # per_max_depth_train_err_d_FashionMNIST[-1] = np.array(per_max_depth_train_err_d_FashionMNIST[-1])\n",
    "        per_max_depth_test_loglike_d_95th_FashionMNIST[-1] = np.array(per_max_depth_test_loglike_d_95th_FashionMNIST[-1])\n",
    "        per_max_depth_test_err_d_95th_FashionMNIST[-1] = np.array(per_max_depth_test_err_d_95th_FashionMNIST[-1])\n",
    "        \n",
    "                       \n",
    "sort_idxs_FashionMNIST = np.argsort(max_depths_FashionMNIST)\n",
    "\n",
    "max_depths_FashionMNIST = np.array(max_depths_FashionMNIST)[sort_idxs_FashionMNIST]\n",
    "\n",
    "per_max_depth_q_FashionMNIST = [per_max_depth_q_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "per_max_depth_KL_FashionMNIST = [per_max_depth_KL_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "per_max_depth_d_exp_FashionMNIST = [per_max_depth_d_exp_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "per_max_depth_d_95th_FashionMNIST = [per_max_depth_d_95th_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "\n",
    "# per_max_depth_train_loglike_FashionMNIST = [per_max_depth_train_loglike_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "# per_max_depth_train_err_FashionMNIST = [per_max_depth_train_err_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "per_max_depth_test_loglike_FashionMNIST = [per_max_depth_test_loglike_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "per_max_depth_test_err_FashionMNIST = [per_max_depth_test_err_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "\n",
    "\n",
    "# per_max_depth_train_loglike_d_exp_FashionMNIST = [per_max_depth_train_loglike_d_exp_FashionMNIST[i] for i in sort_idxs]\n",
    "# per_max_depth_train_err_d_exp_FashionMNIST = [per_max_depth_train_err_d_exp_FashionMNIST[i] for i in sort_idxs]\n",
    "per_max_depth_test_loglike_d_exp_FashionMNIST = [per_max_depth_test_loglike_d_exp_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "per_max_depth_test_err_d_exp_FashionMNIST = [per_max_depth_test_err_d_exp_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "\n",
    "# per_max_depth_train_loglike_d_95th_FashionMNIST = [per_max_depth_train_loglike_d_95th_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "# per_max_depth_train_err_d_95th_FashionMNIST = [per_max_depth_train_err_d_95th_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "per_max_depth_test_loglike_d_95th_FashionMNIST = [per_max_depth_test_loglike_d_95th_FashionMNIST[i] for i in sort_idxs_FashionMNIST]\n",
    "per_max_depth_test_err_d_95th_FashionMNIST = [per_max_depth_test_err_d_95th_FashionMNIST[i] for i in sort_idxs_FashionMNIST]    "
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data: scan deterministic depth - MNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "experiment_dir = save_dir + 'CNN_BNN_MNIST_deterministic_cat/' + 'deterministic_depth_scan/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 1\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n",
    "])\n",
    "\n",
    "trainset = datasets.MNIST(root='../data', train=True, download=True, transform=transform_train)\n",
    "valset = datasets.MNIST(root='../data', train=False, download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "d_depths_MNIST = []\n",
    "\n",
    "# per_d_depth_train_loglike_MNIST = []\n",
    "# per_d_depth_train_err_MNIST = []\n",
    "per_d_depth_test_loglike_MNIST = []\n",
    "per_d_depth_test_err_MNIST = []\n",
    "\n",
    "#iterate over experiment settings\n",
    "exp_folders = os.listdir(experiment_dir)\n",
    "for exp_folder in exp_folders: # loop through all the files and folders\n",
    "    full_exp_folder = os.path.join(experiment_dir, exp_folder)\n",
    "    if os.path.isdir(full_exp_folder): # check whether the current object is a folder or not\n",
    "        \n",
    "        if exp_folder[0] == '.':\n",
    "            continue\n",
    "        \n",
    "        print(int(exp_folder))\n",
    "        \n",
    "        n_layers = int(exp_folder)\n",
    "        probs = np.zeros(n_layers + 1)\n",
    "        probs[-1] = 1\n",
    "        \n",
    "        prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                     inner_width, int(exp_folder), prob_model)  \n",
    "        N_train = len(trainset)\n",
    "        lr = 1e-1\n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "        \n",
    "        d_depths_MNIST.append(int(exp_folder))\n",
    "        \n",
    "        # per_d_depth_train_loglike_MNIST.append([])\n",
    "        # per_d_depth_train_err_MNIST.append([])\n",
    "        per_d_depth_test_loglike_MNIST.append([])\n",
    "        per_d_depth_test_err_MNIST.append([])\n",
    "        \n",
    "        N_run_folders = os.listdir(full_exp_folder)\n",
    "        for N_run_folder in N_run_folders: # loop through all the files and folders\n",
    "            model_folder = os.path.join(full_exp_folder, N_run_folder, 'models')\n",
    "            if os.path.isdir(model_folder): # check whether the current object is a folder or not\n",
    "                \n",
    "                try:\n",
    "                    net.load(model_folder + '/theta_best.dat')\n",
    "                except:\n",
    "                    print('could not load: ' + model_folder)\n",
    "                    continue\n",
    "\n",
    "                loglike_tests, err_tests = [], []\n",
    "\n",
    "                for x, y in valloader:\n",
    "                    if cuda:\n",
    "                        y = y.cuda()\n",
    "\n",
    "                    probs = net.sample_predict(x).sum(dim=0)\n",
    "                    loglike = -F.nll_loss(probs, y, reduction='sum').item()\n",
    "                    pred = probs.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err = pred.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests.append(loglike)\n",
    "                    err_tests.append(err)\n",
    "\n",
    "                N_test = len(valset)\n",
    "                loglike_test = np.sum(loglike_tests) / N_test\n",
    "                err_test= np.sum(err_tests) / N_test\n",
    "                \n",
    "\n",
    "            per_d_depth_test_loglike_MNIST[-1].append(loglike_test)\n",
    "            per_d_depth_test_err_MNIST[-1].append(err_test)\n",
    "\n",
    "        per_d_depth_test_loglike_MNIST[-1] = np.array(per_d_depth_test_loglike_MNIST[-1])\n",
    "        per_d_depth_test_err_MNIST[-1] = np.array(per_d_depth_test_err_MNIST[-1])\n",
    "        \n",
    "                \n",
    "sort_idxs_MNIST2 = np.argsort(d_depths_MNIST)\n",
    "\n",
    "d_depths_MNIST = np.array(d_depths_MNIST)[sort_idxs_MNIST2]\n",
    "\n",
    "per_d_depth_test_loglike_MNIST = [per_d_depth_test_loglike_MNIST[i] for i in sort_idxs_MNIST2]\n",
    "per_d_depth_test_err_MNIST = [per_d_depth_test_err_MNIST[i] for i in sort_idxs_MNIST2]   "
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Gen plot data: scan deterministic depth - SVHN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "experiment_dir = save_dir + 'CNN_BNN_SVHN_deterministic_cat/' + 'deterministic_depth_scan/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 3\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.431, 0.430, 0.446), (0.197, 0.198, 0.199))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.431, 0.430, 0.446), (0.197, 0.198, 0.199))\n",
    "])\n",
    "\n",
    "trainset = datasets.SVHN(root='../data', split=\"train\", download=True, transform=transform_train)\n",
    "valset = datasets.SVHN(root='../data', split=\"test\", download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "d_depths_SVHN = []\n",
    "\n",
    "# per_d_depth_train_loglike_SVHN = []\n",
    "# per_d_depth_train_err_SVHN = []\n",
    "per_d_depth_test_loglike_SVHN = []\n",
    "per_d_depth_test_err_SVHN = []\n",
    "\n",
    "#iterate over experiment settings\n",
    "exp_folders = os.listdir(experiment_dir)\n",
    "for exp_folder in exp_folders: # loop through all the files and folders\n",
    "    full_exp_folder = os.path.join(experiment_dir, exp_folder)\n",
    "    if os.path.isdir(full_exp_folder): # check whether the current object is a folder or not\n",
    "        \n",
    "        if exp_folder[0] == '.':\n",
    "            continue\n",
    "        \n",
    "        print(int(exp_folder))\n",
    "        \n",
    "        n_layers = int(exp_folder)\n",
    "        probs = np.zeros(n_layers + 1)\n",
    "        probs[-1] = 1\n",
    "        \n",
    "        prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                     inner_width, int(exp_folder), prob_model)  \n",
    "        N_train = len(trainset)\n",
    "        lr = 1e-1\n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "        \n",
    "        d_depths_SVHN.append(int(exp_folder))\n",
    "        \n",
    "        # per_d_depth_train_loglike_SVHN.append([])\n",
    "        # per_d_depth_train_err_SVHN.append([])\n",
    "        per_d_depth_test_loglike_SVHN.append([])\n",
    "        per_d_depth_test_err_SVHN.append([])\n",
    "        \n",
    "        \n",
    "        N_run_folders = os.listdir(full_exp_folder)\n",
    "        for N_run_folder in N_run_folders: # loop through all the files and folders\n",
    "            model_folder = os.path.join(full_exp_folder, N_run_folder, 'models')\n",
    "            if os.path.isdir(model_folder): # check whether the current object is a folder or not\n",
    "                \n",
    "                try:\n",
    "                    net.load(model_folder + '/theta_best.dat')\n",
    "                except:\n",
    "                    print('could not load: ' + model_folder)\n",
    "                    continue\n",
    "\n",
    "                loglike_tests, err_tests = [], []\n",
    "\n",
    "                for x, y in valloader:\n",
    "                    if cuda:\n",
    "                        y = y.cuda()\n",
    "\n",
    "                    probs = net.sample_predict(x).sum(dim=0)\n",
    "                    loglike = -F.nll_loss(probs, y, reduction='sum').item()\n",
    "                    pred = probs.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err = pred.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests.append(loglike)\n",
    "                    err_tests.append(err)\n",
    "\n",
    "                N_test = len(valset)\n",
    "                loglike_test = np.sum(loglike_tests) / N_test\n",
    "                err_test= np.sum(err_tests) / N_test\n",
    "                \n",
    "\n",
    "            per_d_depth_test_loglike_SVHN[-1].append(loglike_test)\n",
    "            per_d_depth_test_err_SVHN[-1].append(err_test)\n",
    "\n",
    "        per_d_depth_test_loglike_SVHN[-1] = np.array(per_d_depth_test_loglike_SVHN[-1])\n",
    "        per_d_depth_test_err_SVHN[-1] = np.array(per_d_depth_test_err_SVHN[-1])\n",
    "        \n",
    "                \n",
    "sort_idxs_SVHN2 = np.argsort(d_depths_SVHN)\n",
    "\n",
    "d_depths_SVHN = np.array(d_depths_SVHN)[sort_idxs_SVHN2]\n",
    "\n",
    "per_d_depth_test_loglike_SVHN = [per_d_depth_test_loglike_SVHN[i] for i in sort_idxs_SVHN2]\n",
    "per_d_depth_test_err_SVHN = [per_d_depth_test_err_SVHN[i] for i in sort_idxs_SVHN2]   "
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Get plot data: scan deterministic depth - FashionMNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "experiment_dir = save_dir + 'CNN_BNN_FashionMNIST_deterministic_cat/' + 'deterministic_depth_scan/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 1\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.2860,), std=(0.3530,))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.2860,), std=(0.3530,))\n",
    "])\n",
    "\n",
    "trainset = datasets.FashionMNIST(root='../data', train=True, download=True, transform=transform_train)\n",
    "valset = datasets.FashionMNIST(root='../data', train=False, download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "d_depths_FashionMNIST = []\n",
    "\n",
    "# per_d_depth_train_loglike_FashionMNIST = []\n",
    "# per_d_depth_train_err_FashionMNIST = []\n",
    "per_d_depth_test_loglike_FashionMNIST = []\n",
    "per_d_depth_test_err_FashionMNIST = []\n",
    "\n",
    "#iterate over experiment settings\n",
    "exp_folders = os.listdir(experiment_dir)\n",
    "for exp_folder in exp_folders: # loop through all the files and folders\n",
    "    full_exp_folder = os.path.join(experiment_dir, exp_folder)\n",
    "    if os.path.isdir(full_exp_folder): # check whether the current object is a folder or not\n",
    "        \n",
    "        if exp_folder[0] == '.':\n",
    "            continue\n",
    "        \n",
    "        print(int(exp_folder))\n",
    "        \n",
    "        n_layers = int(exp_folder)\n",
    "        probs = np.zeros(n_layers + 1)\n",
    "        probs[-1] = 1\n",
    "        \n",
    "        prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                     inner_width, int(exp_folder), prob_model)  \n",
    "        N_train = len(trainset)\n",
    "        lr = 1e-1\n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "        \n",
    "        \n",
    "        \n",
    "        d_depths_FashionMNIST.append(int(exp_folder))\n",
    "        \n",
    "        # per_d_depth_train_loglike_FashionMNIST.append([])\n",
    "        # per_d_depth_train_err_FashionMNIST.append([])\n",
    "        per_d_depth_test_loglike_FashionMNIST.append([])\n",
    "        per_d_depth_test_err_FashionMNIST.append([])\n",
    "        \n",
    "        \n",
    "        N_run_folders = os.listdir(full_exp_folder)\n",
    "        for N_run_folder in N_run_folders: # loop through all the files and folders\n",
    "            model_folder = os.path.join(full_exp_folder, N_run_folder, 'models')\n",
    "            if os.path.isdir(model_folder): # check whether the current object is a folder or not\n",
    "                \n",
    "                try:\n",
    "                    net.load(model_folder + '/theta_best.dat')\n",
    "                except:\n",
    "                    print('could not load: ' + model_folder)\n",
    "                    continue\n",
    "\n",
    "                loglike_tests, err_tests = [], []\n",
    "\n",
    "                for x, y in valloader:\n",
    "                    if cuda:\n",
    "                        y = y.cuda()\n",
    "\n",
    "                    probs = net.sample_predict(x).sum(dim=0)\n",
    "                    loglike = -F.nll_loss(probs, y, reduction='sum').item()\n",
    "                    pred = probs.max(dim=1, keepdim=False)[1]  # get the index of the max probability\n",
    "                    err = pred.ne(y).sum().item()\n",
    "\n",
    "                    loglike_tests.append(loglike)\n",
    "                    err_tests.append(err)\n",
    "\n",
    "                N_test = len(valset)\n",
    "                loglike_test = np.sum(loglike_tests) / N_test\n",
    "                err_test= np.sum(err_tests) / N_test\n",
    "                \n",
    "\n",
    "            per_d_depth_test_loglike_FashionMNIST[-1].append(loglike_test)\n",
    "            per_d_depth_test_err_FashionMNIST[-1].append(err_test)\n",
    "\n",
    "        per_d_depth_test_loglike_FashionMNIST[-1] = np.array(per_d_depth_test_loglike_FashionMNIST[-1])\n",
    "        per_d_depth_test_err_FashionMNIST[-1] = np.array(per_d_depth_test_err_FashionMNIST[-1])\n",
    "        \n",
    "                \n",
    "sort_idxs_FashionMNIST2 = np.argsort(d_depths_FashionMNIST)\n",
    "\n",
    "d_depths_FashionMNIST = np.array(d_depths_FashionMNIST)[sort_idxs_FashionMNIST2]\n",
    "\n",
    "\n",
    "per_d_depth_test_loglike_FashionMNIST = [per_d_depth_test_loglike_FashionMNIST[i] for i in sort_idxs_FashionMNIST2]\n",
    "per_d_depth_test_err_FashionMNIST = [per_d_depth_test_err_FashionMNIST[i] for i in sort_idxs_FashionMNIST2]\n",
    " "
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Save plot data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# to_save = (\n",
    "#     per_d_depth_test_loglike_MNIST,\n",
    "#     per_d_depth_test_err_MNIST,\n",
    "#     per_max_depth_q_MNIST,\n",
    "#     per_max_depth_test_loglike_MNIST,\n",
    "#     per_max_depth_test_err_MNIST,\n",
    "#     per_max_depth_test_loglike_d_exp_MNIST,\n",
    "#     per_max_depth_test_loglike_d_95th_MNIST,\n",
    "#     per_max_depth_test_err_d_exp_MNIST,\n",
    "#     per_max_depth_test_err_d_95th_MNIST,\n",
    "#     per_max_depth_d_exp_MNIST,\n",
    "#     per_max_depth_d_95th_MNIST,\n",
    "#     per_d_depth_test_loglike_FashionMNIST,\n",
    "#     per_d_depth_test_err_FashionMNIST,\n",
    "#     per_max_depth_q_FashionMNIST,\n",
    "#     per_max_depth_test_loglike_FashionMNIST,\n",
    "#     per_max_depth_test_err_FashionMNIST,\n",
    "#     per_max_depth_test_loglike_d_exp_FashionMNIST,\n",
    "#     per_max_depth_test_loglike_d_95th_FashionMNIST,\n",
    "#     per_max_depth_test_err_d_exp_FashionMNIST,\n",
    "#     per_max_depth_test_err_d_95th_FashionMNIST,\n",
    "#     per_max_depth_d_exp_FashionMNIST,\n",
    "#     per_max_depth_d_95th_FashionMNIST,\n",
    "#     per_d_depth_test_loglike_SVHN,\n",
    "#     per_d_depth_test_err_SVHN,\n",
    "#     per_max_depth_q_SVHN,\n",
    "#     per_max_depth_test_loglike_SVHN,\n",
    "#     per_max_depth_test_err_SVHN,\n",
    "#     per_max_depth_test_loglike_d_exp_SVHN,\n",
    "#     per_max_depth_test_loglike_d_95th_SVHN,\n",
    "#     per_max_depth_test_err_d_exp_SVHN,\n",
    "#     per_max_depth_test_err_d_95th_SVHN,\n",
    "#     per_max_depth_d_exp_SVHN,\n",
    "#     per_max_depth_d_95th_SVHN,\n",
    "# )\n",
    "\n",
    "\n",
    "# with open('../saves/paper_plots/image_depth_dist_exps.pkl','wb') as f:\n",
    "#      pickle.dump(to_save, f)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Load plot data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('../saves/paper_plots/image_depth_dist_exps_final.pkl','rb') as f:\n",
    "     (\n",
    "        per_d_depth_test_loglike_MNIST,\n",
    "        per_d_depth_test_err_MNIST,\n",
    "        per_max_depth_q_MNIST,\n",
    "        per_max_depth_test_loglike_MNIST,\n",
    "        per_max_depth_test_err_MNIST,\n",
    "        per_max_depth_test_loglike_d_exp_MNIST,\n",
    "        per_max_depth_test_loglike_d_95th_MNIST,\n",
    "        per_max_depth_test_err_d_exp_MNIST,\n",
    "        per_max_depth_test_err_d_95th_MNIST,\n",
    "        per_max_depth_d_exp_MNIST,\n",
    "        per_max_depth_d_95th_MNIST,\n",
    "        per_d_depth_test_loglike_FashionMNIST,\n",
    "        per_d_depth_test_err_FashionMNIST,\n",
    "        per_max_depth_q_FashionMNIST,\n",
    "        per_max_depth_test_loglike_FashionMNIST,\n",
    "        per_max_depth_test_err_FashionMNIST,\n",
    "        per_max_depth_test_loglike_d_exp_FashionMNIST,\n",
    "        per_max_depth_test_loglike_d_95th_FashionMNIST,\n",
    "        per_max_depth_test_err_d_exp_FashionMNIST,\n",
    "        per_max_depth_test_err_d_95th_FashionMNIST,\n",
    "        per_max_depth_d_exp_FashionMNIST,\n",
    "        per_max_depth_d_95th_FashionMNIST,\n",
    "        per_d_depth_test_loglike_SVHN,\n",
    "        per_d_depth_test_err_SVHN,\n",
    "        per_max_depth_q_SVHN,\n",
    "        per_max_depth_test_loglike_SVHN,\n",
    "        per_max_depth_test_err_SVHN,\n",
    "        per_max_depth_test_loglike_d_exp_SVHN,\n",
    "        per_max_depth_test_loglike_d_95th_SVHN,\n",
    "        per_max_depth_test_err_d_exp_SVHN,\n",
    "        per_max_depth_test_err_d_95th_SVHN,\n",
    "        per_max_depth_d_exp_SVHN,\n",
    "        per_max_depth_d_95th_SVHN,\n",
    "    ) = pickle.load(f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plots"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_layers = 50\n",
    "index = 10\n",
    "\n",
    "## MNIST\n",
    "# deterministic\n",
    "perdd_te_ll_means_MNIST, perdd_te_ll_stds_MNIST = get_run_mean_std(per_d_depth_test_loglike_MNIST)\n",
    "perdd_te_err_means_MNIST, perdd_te_err_stds_MNIST = get_run_mean_std(np.array(per_d_depth_test_err_MNIST)*100)\n",
    "\n",
    "\n",
    "# Proposed full depth\n",
    "depth_means_MNIST, depth_stds_MNIST = get_run_mean_std(per_max_depth_q_MNIST, last_argmax=True)\n",
    "te_ll_means_MNIST, te_ll_stds_MNIST = get_run_mean_std(per_max_depth_test_loglike_MNIST)\n",
    "te_err_means_MNIST, te_err_stds_MNIST = get_run_mean_std(np.array(per_max_depth_test_err_MNIST)*100)\n",
    "\n",
    "\n",
    "# Prune depth\n",
    "te_ll_means_d_exp_MNIST, te_ll_stds_d_exp_MNIST = get_run_mean_std(per_max_depth_test_loglike_d_exp_MNIST)\n",
    "te_ll_means_d_MNIST, te_ll_stds_d_MNIST = get_run_mean_std(per_max_depth_test_loglike_d_95th_MNIST)\n",
    "te_err_means_d_exp_MNIST, te_err_stds_d_exp_MNIST = get_run_mean_std(np.array(per_max_depth_test_err_d_exp_MNIST)*100)\n",
    "te_err_means_d_MNIST, te_err_stds_d_MNIST = get_run_mean_std(np.array(per_max_depth_test_err_d_95th_MNIST)*100)\n",
    "\n",
    "depth_means_exp_MNIST, depth_stds_exp_MNIST = get_run_mean_std(per_max_depth_d_exp_MNIST)\n",
    "depth_means_MNIST, depth_stds_MNIST = get_run_mean_std(per_max_depth_d_95th_MNIST)\n",
    "\n",
    "\n",
    "# get results at depth n\n",
    "depth_n_probs_MNIST = per_max_depth_q_MNIST[index].mean(axis=0)\n",
    "depth_n_probs_std_MNIST = per_max_depth_q_MNIST[index].std(axis=0)\n",
    "\n",
    "learnt_depth_ll_mean_MNIST = te_ll_means_d_MNIST[index]\n",
    "learnt_depth_ll_std_MNIST = te_ll_stds_d_MNIST[index]\n",
    "learnt_depth_err_mean_MNIST = te_err_means_d_MNIST[index]\n",
    "learnt_depth_err_std_MNIST = te_err_stds_d_MNIST[index]\n",
    "learnt_depth_mean_MNIST = depth_means_MNIST[index]\n",
    "learnt_depth_std_MNIST = depth_stds_MNIST[index]\n",
    "\n",
    "## FashionMNIST\n",
    "# deterministic\n",
    "perdd_te_ll_means_FashionMNIST, perdd_te_ll_stds_FashionMNIST = get_run_mean_std(per_d_depth_test_loglike_FashionMNIST)\n",
    "perdd_te_err_means_FashionMNIST, perdd_te_err_stds_FashionMNIST = get_run_mean_std(np.array(per_d_depth_test_err_FashionMNIST)*100)\n",
    "\n",
    "# Proposed full depth\n",
    "depth_means_FashionMNIST, depth_stds_FashionMNIST = get_run_mean_std(per_max_depth_q_FashionMNIST, last_argmax=True)\n",
    "te_ll_means_FashionMNIST, te_ll_stds_FashionMNIST = get_run_mean_std(per_max_depth_test_loglike_FashionMNIST)\n",
    "te_err_means_FashionMNIST, te_err_stds_FashionMNIST = get_run_mean_std(np.array(per_max_depth_test_err_FashionMNIST)*100)\n",
    "\n",
    "# Prune depth\n",
    "te_ll_means_d_exp_FashionMNIST, te_ll_stds_d_exp_FashionMNIST = get_run_mean_std(per_max_depth_test_loglike_d_exp_FashionMNIST)\n",
    "te_ll_means_d_FashionMNIST, te_ll_stds_d_FashionMNIST = get_run_mean_std(per_max_depth_test_loglike_d_95th_FashionMNIST)\n",
    "te_err_means_d_exp_FashionMNIST, te_err_stds_d_exp_FashionMNIST = get_run_mean_std(np.array(per_max_depth_test_err_d_exp_FashionMNIST)*100)\n",
    "te_err_means_d_FashionMNIST, te_err_stds_d_FashionMNIST = get_run_mean_std(np.array(per_max_depth_test_err_d_95th_FashionMNIST)*100)\n",
    "\n",
    "depth_means_exp_FashionMNIST, depth_stds_exp_FashionMNIST = get_run_mean_std(per_max_depth_d_exp_FashionMNIST)\n",
    "depth_means_FashionMNIST, depth_stds_FashionMNIST = get_run_mean_std(per_max_depth_d_95th_FashionMNIST)\n",
    "\n",
    "# get results at depth n\n",
    "depth_n_probs_FashionMNIST = per_max_depth_q_FashionMNIST[index].mean(axis=0)\n",
    "depth_n_probs_std_FashionMNIST = per_max_depth_q_FashionMNIST[index].std(axis=0)\n",
    "\n",
    "learnt_depth_ll_mean_FashionMNIST = te_ll_means_d_FashionMNIST[index]\n",
    "learnt_depth_ll_std_FashionMNIST = te_ll_stds_d_FashionMNIST[index]\n",
    "learnt_depth_err_mean_FashionMNIST = te_err_means_d_FashionMNIST[index]\n",
    "learnt_depth_err_std_FashionMNIST = te_err_stds_d_FashionMNIST[index]\n",
    "learnt_depth_mean_FashionMNIST = depth_means_FashionMNIST[index]\n",
    "learnt_depth_std_FashionMNIST = depth_stds_FashionMNIST[index]\n",
    "\n",
    "## SVHN\n",
    "# deterministic\n",
    "perdd_te_ll_means_SVHN, perdd_te_ll_stds_SVHN = get_run_mean_std(per_d_depth_test_loglike_SVHN)\n",
    "perdd_te_err_means_SVHN, perdd_te_err_stds_SVHN = get_run_mean_std(np.array(per_d_depth_test_err_SVHN)*100)\n",
    "\n",
    "# Proposed full depth\n",
    "depth_means_SVHN, depth_stds_SVHN = get_run_mean_std(per_max_depth_q_SVHN, last_argmax=True)\n",
    "te_ll_means_SVHN, te_ll_stds_SVHN = get_run_mean_std(per_max_depth_test_loglike_SVHN)\n",
    "te_err_means_SVHN, te_err_stds_SVHN = get_run_mean_std(np.array(per_max_depth_test_err_SVHN)*100)\n",
    "\n",
    "# Prune depth\n",
    "te_ll_means_d_exp_SVHN, te_ll_stds_d_exp_SVHN = get_run_mean_std(per_max_depth_test_loglike_d_exp_SVHN)\n",
    "te_ll_means_d_SVHN, te_ll_stds_d_SVHN = get_run_mean_std(per_max_depth_test_loglike_d_95th_SVHN)\n",
    "te_err_means_d_exp_SVHN, te_err_stds_d_exp_SVHN = get_run_mean_std(np.array(per_max_depth_test_err_d_exp_SVHN)*100)\n",
    "te_err_means_d_SVHN, te_err_stds_d_SVHN = get_run_mean_std(np.array(per_max_depth_test_err_d_95th_SVHN)*100)\n",
    "\n",
    "depth_means_exp_SVHN, depth_stds_exp_SVHN = get_run_mean_std(per_max_depth_d_exp_SVHN)\n",
    "depth_means_SVHN, depth_stds_SVHN = get_run_mean_std(per_max_depth_d_95th_SVHN)\n",
    "\n",
    "# get results at depth n\n",
    "depth_n_probs_SVHN = per_max_depth_q_SVHN[index].mean(axis=0)\n",
    "depth_n_probs_std_SVHN = per_max_depth_q_SVHN[index].std(axis=0)\n",
    "\n",
    "learnt_depth_ll_mean_SVHN = te_ll_means_d_SVHN[index]\n",
    "learnt_depth_ll_std_SVHN = te_ll_stds_d_SVHN[index]\n",
    "learnt_depth_err_mean_SVHN = te_err_means_d_SVHN[index]\n",
    "learnt_depth_err_std_SVHN = te_err_stds_d_SVHN[index]\n",
    "learnt_depth_mean_SVHN = depth_means_SVHN[index]\n",
    "learnt_depth_std_SVHN = depth_stds_SVHN[index]"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Main text detailed plot"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "markersize = 0.05\n",
    "lw = 1\n",
    "s=8\n",
    "dpi=800 \n",
    "\n",
    "fig, ax = plt.subplots(nrows=1, ncols=3, dpi=dpi, figsize=(1.1*text_width, 2.18), sharey=True)\n",
    "\n",
    "ax02 = ax[0].twinx()\n",
    "ax12 = ax[1].twinx()\n",
    "ax22 = ax[2].twinx()\n",
    "\n",
    "ax02.get_shared_y_axes().join(ax02, ax12, ax22)\n",
    "ax02.get_yaxis().set_ticklabels([])\n",
    "ax12.get_yaxis().set_ticklabels([])\n",
    "ax02.set_ylim([0.4, 1])\n",
    "\n",
    "ax[1].set_xlabel('$d$  (No. active blocks)')\n",
    "ax[0].set_ylabel(r'$q_{\\boldsymbol{\\alpha}}(d)$')\n",
    "\n",
    "ax22.set_ylabel('test log-like')\n",
    "\n",
    "ax[0].set_title(\"MNIST\")\n",
    "ax[1].set_title(\"Fashion-MNIST\")\n",
    "ax[2].set_title(\"SVHN\")\n",
    "\n",
    "ax[0].set_ylim([0, 0.055])\n",
    "\n",
    "ax[0].set_xlim([0, 50])\n",
    "ax[1].set_xlim([0, 50])\n",
    "ax[2].set_xlim([0, 50])\n",
    "\n",
    "depths = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100]\n",
    "\n",
    "######### PLOT 1 - MNIST ########################\n",
    "\n",
    "ax[0].axvspan(learnt_depth_mean_MNIST - learnt_depth_std_MNIST, learnt_depth_mean_MNIST + learnt_depth_std_MNIST, alpha=0.5, facecolor='black')\n",
    "learnt_depth_line =  ax[0].axvline(learnt_depth_mean_MNIST, color='black', lw=0.7)\n",
    "\n",
    "bars = ax[0].bar(range(n_layers + 1), depth_n_probs_MNIST, width=(1/n_layers + 1), yerr=depth_n_probs_std_MNIST,\n",
    "        alpha=0.8, label='$\\alpha_{i}$', color=c[0], edgecolor='k', error_kw=dict(lw=0.7))\n",
    "\n",
    "our_line, = errorfill(range(n_layers + 1), np.array([learnt_depth_ll_mean_MNIST]*(n_layers + 1)),\n",
    "          np.array([learnt_depth_ll_std_MNIST]*(n_layers + 1)),\n",
    "          color=c[0], alpha_fill=0.3, ax=ax02, lw=lw, linestyle='-.')\n",
    "\n",
    "det_depth_line, = errorfill(depths[:index+1], perdd_te_ll_means_MNIST[:index+1], te_ll_stds_MNIST[:index+1],\n",
    "          color=c[3], alpha_fill=0.3, ax=ax02, lw=lw, linestyle='--')\n",
    "\n",
    "ax02.scatter(depths[:index+1], perdd_te_ll_means_MNIST[:index+1], c=c[3], s=s)\n",
    "\n",
    "ax[0].yaxis.grid(alpha=0.3)\n",
    "ax[0].xaxis.grid(alpha=0.3) \n",
    "\n",
    "p_lgd = [ our_line, det_depth_line, bars, learnt_depth_line]\n",
    "lgd = ax[0].legend(p_lgd,\n",
    "['LDN', 'DDN', '$\\\\alpha_{i}$', '$d_{opt}$'], #\n",
    "                prop={'size': fs-2, 'weight': 'normal'}, frameon=True, markerscale=markersize, loc=\"lower right\")\n",
    "\n",
    "######### PLOT 2 - FashionMNIST ########################\n",
    "\n",
    "ax[1].axvspan(learnt_depth_mean_FashionMNIST - learnt_depth_std_FashionMNIST, learnt_depth_mean_FashionMNIST + learnt_depth_std_FashionMNIST, alpha=0.5, facecolor='black')\n",
    "ax[1].axvline(learnt_depth_mean_FashionMNIST, color='black', lw=0.7)\n",
    "\n",
    "bars = ax[1].bar(range(n_layers + 1), depth_n_probs_FashionMNIST, width=(1/n_layers + 1), yerr=depth_n_probs_std_FashionMNIST,\n",
    "        alpha=0.8, label='$\\alpha_{i}$', color=c[0], edgecolor='k', error_kw=dict(lw=0.7))\n",
    "\n",
    "our_line, = errorfill(range(n_layers + 1), np.array([learnt_depth_ll_mean_FashionMNIST]*(n_layers + 1)),\n",
    "          np.array([learnt_depth_ll_std_FashionMNIST]*(n_layers + 1)),\n",
    "          color=c[0], alpha_fill=0.3, ax=ax12, lw=lw, linestyle='-.')\n",
    "\n",
    "det_depth_line, = errorfill(depths[:index+1], perdd_te_ll_means_FashionMNIST[:index+1], te_ll_stds_FashionMNIST[:index+1],\n",
    "          color=c[3], alpha_fill=0.3, ax=ax12, lw=lw, linestyle='--')\n",
    "\n",
    "ax12.scatter(depths[:index+1], perdd_te_ll_means_FashionMNIST[:index+1], c=c[3], s=s)\n",
    "\n",
    "ax[1].yaxis.grid(alpha=0.3)\n",
    "ax[1].xaxis.grid(alpha=0.3) \n",
    "\n",
    "######### PLOT 3 - SVHN ########################\n",
    "\n",
    "ax[2].axvspan(learnt_depth_mean_SVHN - learnt_depth_std_SVHN, learnt_depth_mean_SVHN + learnt_depth_std_SVHN, alpha=0.5, facecolor='black')\n",
    "ax[2].axvline(learnt_depth_mean_SVHN, color='black', lw=0.7)\n",
    "\n",
    "bars = ax[2].bar(range(n_layers + 1), depth_n_probs_SVHN, width=(1/n_layers + 1), yerr=depth_n_probs_std_SVHN,\n",
    "        alpha=0.8, label='$\\alpha_{i}$', color=c[0], edgecolor='k', error_kw=dict(lw=0.7))\n",
    "\n",
    "our_line, = errorfill(range(n_layers + 1), np.array([learnt_depth_ll_mean_SVHN]*(n_layers + 1)),\n",
    "          np.array([learnt_depth_ll_std_SVHN]*(n_layers + 1)),\n",
    "          color=c[0], alpha_fill=0.3, ax=ax22, lw=lw, linestyle='-.')\n",
    "\n",
    "det_depth_line, = errorfill(depths[:index+1], perdd_te_ll_means_SVHN[:index+1], perdd_te_ll_stds_SVHN[:index+1],\n",
    "          color=c[3], alpha_fill=0.3, ax=ax22, lw=lw, linestyle='--')\n",
    "\n",
    "ax22.scatter(depths[:index+1], perdd_te_ll_means_SVHN[:index+1], c=c[3], s=s)\n",
    "\n",
    "ax[2].yaxis.grid(alpha=0.3)\n",
    "ax[2].xaxis.grid(alpha=0.3) \n",
    "\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig(plot_savedir + 'image_depth_dist_exps.pdf', format='pdf', bbox_inches='tight')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Appendix detailed results plot"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "markersize = 0.05\n",
    "lw = 1\n",
    "s=8\n",
    "dpi=800\n",
    "\n",
    "fig, ax = plt.subplots(nrows=5, ncols=3, dpi=dpi, figsize=(1.1*text_width*0.9, 10.*0.9), sharey=False)\n",
    "\n",
    "ax[4, 1].set_xlabel('D  (Max depth)')\n",
    "\n",
    "ax[0, 0].set_ylabel('d  (No. active blocks)')\n",
    "ax[1, 0].set_ylabel('test log-like')\n",
    "ax[2, 0].set_ylabel('test log-like')\n",
    "ax[3, 0].set_ylabel('test error')\n",
    "ax[4, 0].set_ylabel('test error')\n",
    "\n",
    "ax[0, 0].set_title(\"MNIST\")\n",
    "ax[0, 1].set_title(\"Fashion-MNIST\")\n",
    "ax[0, 2].set_title(\"SVHN\")\n",
    "\n",
    "ax[0, 0].get_shared_y_axes().join(ax[0, 0], ax[0, 1], ax[0, 2])\n",
    "\n",
    "ax[1, 0].get_shared_y_axes().join(ax[1, 0], ax[1, 1], ax[1, 2])\n",
    "\n",
    "ax[2, 0].get_shared_y_axes().join(ax[2, 0], ax[2, 1], ax[2, 2])\n",
    "\n",
    "ax[0, 0].set_ylim([0, 100])\n",
    "ax[1, 0].set_ylim([0.7, 1])\n",
    "ax[2, 0].set_ylim([0.7, 1])\n",
    "ax[3, 0].set_ylim([0, 5])\n",
    "ax[3, 1].set_ylim([7.5, 20])\n",
    "ax[3, 2].set_ylim([4, 12])\n",
    "ax[4, 0].set_ylim([0, 5])\n",
    "ax[4, 1].set_ylim([7.5, 20])\n",
    "ax[4, 2].set_ylim([4, 12])\n",
    "\n",
    "ax[0, 0].set_xlim([0, 100])\n",
    "ax[0, 1].set_xlim([0, 100])\n",
    "ax[0, 2].set_xlim([0, 100])\n",
    "ax[1, 0].set_xlim([0, 100])\n",
    "ax[1, 1].set_xlim([0, 100])\n",
    "ax[1, 2].set_xlim([0, 100])\n",
    "ax[2, 0].set_xlim([0, 100])\n",
    "ax[2, 1].set_xlim([0, 100])\n",
    "ax[2, 2].set_xlim([0, 100])\n",
    "ax[3, 0].set_xlim([0, 100])\n",
    "ax[3, 1].set_xlim([0, 100])\n",
    "ax[3, 2].set_xlim([0, 100])\n",
    "ax[4, 0].set_xlim([0, 100])\n",
    "ax[4, 1].set_xlim([0, 100])\n",
    "ax[4, 2].set_xlim([0, 100])\n",
    "\n",
    "ax[0, 0].yaxis.grid(alpha=0.3)\n",
    "ax[0, 0].xaxis.grid(alpha=0.3) \n",
    "ax[0, 1].yaxis.grid(alpha=0.3)\n",
    "ax[0, 1].xaxis.grid(alpha=0.3) \n",
    "ax[0, 2].yaxis.grid(alpha=0.3)\n",
    "ax[0, 2].xaxis.grid(alpha=0.3) \n",
    "ax[1, 0].yaxis.grid(alpha=0.3)\n",
    "ax[1, 0].xaxis.grid(alpha=0.3) \n",
    "ax[1, 1].yaxis.grid(alpha=0.3)\n",
    "ax[1, 1].xaxis.grid(alpha=0.3) \n",
    "ax[1, 2].yaxis.grid(alpha=0.3)\n",
    "ax[1, 2].xaxis.grid(alpha=0.3)\n",
    "ax[2, 0].yaxis.grid(alpha=0.3)\n",
    "ax[2, 0].xaxis.grid(alpha=0.3) \n",
    "ax[2, 1].yaxis.grid(alpha=0.3)\n",
    "ax[2, 1].xaxis.grid(alpha=0.3) \n",
    "ax[2, 2].yaxis.grid(alpha=0.3)\n",
    "ax[2, 2].xaxis.grid(alpha=0.3) \n",
    "ax[3, 0].yaxis.grid(alpha=0.3)\n",
    "ax[3, 0].xaxis.grid(alpha=0.3) \n",
    "ax[3, 1].yaxis.grid(alpha=0.3)\n",
    "ax[3, 1].xaxis.grid(alpha=0.3) \n",
    "ax[3, 2].yaxis.grid(alpha=0.3)\n",
    "ax[3, 2].xaxis.grid(alpha=0.3) \n",
    "ax[4, 0].yaxis.grid(alpha=0.3)\n",
    "ax[4, 0].xaxis.grid(alpha=0.3) \n",
    "ax[4, 1].yaxis.grid(alpha=0.3)\n",
    "ax[4, 1].xaxis.grid(alpha=0.3) \n",
    "ax[4, 2].yaxis.grid(alpha=0.3)\n",
    "ax[4, 2].xaxis.grid(alpha=0.3)  \n",
    "\n",
    "depths = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100]\n",
    "\n",
    "######################################\n",
    "plt.autoscale(enable=True, axis='x', tight=True)\n",
    "\n",
    "### depths \n",
    "## deterministic\n",
    "det_depths_line, = ax[0, 0].plot(range(np.min(depths), depths[-1]), range(np.min(depths), depths[-1]), '-', c=c[3], lw=lw)\n",
    "ax[0, 1].plot(range(np.min(depths), depths[-1]), range(np.min(depths), depths[-1]), '-', c=c[3], lw=lw)\n",
    "ax[0, 2].plot(range(np.min(depths), depths[-1]), range(np.min(depths), depths[-1]), '-', c=c[3], lw=lw)\n",
    "\n",
    "## learnt 95th\n",
    "depths_95th_line, = errorfill(depths, depth_means_MNIST, depth_stds_MNIST, color=c[2], alpha_fill=0.3,\n",
    "                          ax=ax[0, 0], lw=lw, linestyle='-')\n",
    "ax[0, 0].scatter(depths, depth_means_MNIST, c=c[2], s=s)\n",
    "\n",
    "errorfill(depths, depth_means_FashionMNIST, depth_stds_FashionMNIST, color=c[2], alpha_fill=0.3,\n",
    "                          ax=ax[0, 1], lw=lw, linestyle='-')\n",
    "ax[0, 1].scatter(depths, depth_means_FashionMNIST, c=c[2], s=s)\n",
    "\n",
    "errorfill(depths, depth_means_SVHN, depth_stds_SVHN, color=c[2], alpha_fill=0.3,\n",
    "                          ax=ax[0, 2], lw=lw, linestyle='-')\n",
    "ax[0, 2].scatter(depths, depth_means_SVHN, c=c[2], s=s)\n",
    "\n",
    "## learnt exp\n",
    "depths_exp_line, = errorfill(depths, depth_means_exp_MNIST, depth_stds_exp_MNIST, color=c[4], alpha_fill=0.3,\n",
    "                          ax=ax[0, 0], lw=lw, linestyle='-')\n",
    "ax[0, 0].scatter(depths, depth_means_exp_MNIST, c=c[4], s=s)\n",
    "\n",
    "errorfill(depths, depth_means_exp_FashionMNIST, depth_stds_exp_FashionMNIST, color=c[4], alpha_fill=0.3,\n",
    "                          ax=ax[0, 1], lw=lw, linestyle='-')\n",
    "ax[0, 1].scatter(depths, depth_means_exp_FashionMNIST, c=c[4], s=s)\n",
    "\n",
    "errorfill(depths, depth_means_exp_SVHN, depth_stds_exp_SVHN, color=c[4], alpha_fill=0.3,\n",
    "                          ax=ax[0, 2], lw=lw, linestyle='-')\n",
    "ax[0, 2].scatter(depths, depth_means_exp_SVHN, c=c[4], s=s)\n",
    "\n",
    "a_lgd = [det_depths_line, depths_exp_line, depths_95th_line]\n",
    "lgd = ax[0, 2].legend(a_lgd,\n",
    "['D', 'LDN-$\\mathbb{E}$', 'LDN-95'],\n",
    "                prop={'size': fs-1, 'weight': 'normal'}, frameon=True, markerscale=markersize, loc='upper left')\n",
    "\n",
    "\n",
    "### lls\n",
    "## determinisitc\n",
    "mnist_det_ll_errrfill, = errorfill(depths, perdd_te_ll_means_MNIST, perdd_te_ll_stds_MNIST, color=c[3], alpha_fill=0.3, ax=ax[1, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[1, 0].scatter(depths, perdd_te_ll_means_MNIST, c=c[3], s=s)\n",
    "\n",
    "fmnist_det_ll_errrfill, = errorfill(depths, perdd_te_ll_means_FashionMNIST, perdd_te_ll_stds_FashionMNIST, color=c[3], alpha_fill=0.3, ax=ax[1, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[1, 1].scatter(depths, perdd_te_ll_means_FashionMNIST, c=c[3], s=s)\n",
    "\n",
    "svhn_det_ll_errrfill, = errorfill(depths, perdd_te_ll_means_SVHN, perdd_te_ll_stds_SVHN, color=c[3], alpha_fill=0.3, ax=ax[1, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[1, 2].scatter(depths, perdd_te_ll_means_SVHN, c=c[3], s=s)\n",
    "\n",
    "## learnt 95th\n",
    "mnist_ll_errrfill, = errorfill(depths, te_ll_means_d_MNIST , te_ll_stds_d_MNIST, color=c[2], alpha_fill=0.3, ax=ax[1, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[1, 0].scatter(depths, te_ll_means_d_MNIST, c=c[2], s=s)\n",
    "\n",
    "fmnist_ll_errrfill, = errorfill(depths, te_ll_means_d_FashionMNIST , te_ll_stds_d_FashionMNIST, color=c[2], alpha_fill=0.3, ax=ax[1, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[1, 1].scatter(depths, te_ll_means_d_FashionMNIST, c=c[2], s=s)\n",
    "\n",
    "svhn_ll_errrfill, = errorfill(depths, te_ll_means_d_SVHN , te_ll_stds_d_SVHN, color=c[2], alpha_fill=0.3, ax=ax[1, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[1, 2].scatter(depths, te_ll_means_d_SVHN, c=c[2], s=s)\n",
    "\n",
    "a_lgd = [svhn_det_ll_errrfill, svhn_ll_errrfill]\n",
    "lgd = ax[1, 2].legend(a_lgd,\n",
    "['DDN', 'LDN-95'],\n",
    "                prop={'size': fs-1, 'weight': 'normal'}, frameon=True, markerscale=markersize, loc='lower right')\n",
    "\n",
    "## learnt 95th\n",
    "mnist_ll_errrfill, = errorfill(depths, te_ll_means_d_MNIST , te_ll_stds_d_MNIST, color=c[2], alpha_fill=0.3, ax=ax[2, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 0].scatter(depths, te_ll_means_d_MNIST, c=c[2], s=s)\n",
    "\n",
    "fmnist_ll_errrfill, = errorfill(depths, te_ll_means_d_FashionMNIST , te_ll_stds_d_FashionMNIST, color=c[2], alpha_fill=0.3, ax=ax[2, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 1].scatter(depths, te_ll_means_d_FashionMNIST, c=c[2], s=s)\n",
    "\n",
    "svhn_ll_errrfill, = errorfill(depths, te_ll_means_d_SVHN , te_ll_stds_d_SVHN, color=c[2], alpha_fill=0.3, ax=ax[2, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 2].scatter(depths, te_ll_means_d_SVHN, c=c[2], s=s)\n",
    "\n",
    "## full D\n",
    "mnist_full_err_errrfill, = errorfill(depths, te_ll_means_MNIST, te_ll_stds_MNIST, color=c[5], alpha_fill=0.3, ax=ax[2, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 0].scatter(depths, te_ll_means_MNIST, c=c[5], s=s)\n",
    "\n",
    "fmnist_full_ll_errrfill, = errorfill(depths, te_ll_means_FashionMNIST, te_ll_stds_FashionMNIST, color=c[5], alpha_fill=0.3, ax=ax[2, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 1].scatter(depths, te_ll_means_FashionMNIST, c=c[5], s=s)\n",
    "\n",
    "svhn_full_ll_errrfill, = errorfill(depths, te_ll_means_SVHN , te_ll_stds_SVHN, color=c[5], alpha_fill=0.3, ax=ax[2, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 2].scatter(depths, te_ll_means_SVHN, c=c[5], s=s)\n",
    "\n",
    "## learnt exp\n",
    "mnist_exp_ll_errrfill, = errorfill(depths, te_ll_means_d_exp_MNIST , te_ll_stds_d_exp_MNIST, color=c[4], alpha_fill=0.3, ax=ax[2, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 0].scatter(depths, te_ll_means_d_exp_MNIST, c=c[4], s=s)\n",
    "\n",
    "fmnist_exp_ll_errrfill, = errorfill(depths, te_ll_means_d_exp_FashionMNIST , te_ll_stds_d_exp_FashionMNIST, color=c[4], alpha_fill=0.3, ax=ax[2, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 1].scatter(depths, te_ll_means_d_exp_FashionMNIST, c=c[4], s=s)\n",
    "\n",
    "svhn_exp_ll_errrfill, = errorfill(depths, te_ll_means_d_exp_SVHN , te_ll_stds_d_exp_SVHN, color=c[4], alpha_fill=0.3, ax=ax[2, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[2, 2].scatter(depths, te_ll_means_d_exp_SVHN, c=c[4], s=s)\n",
    "\n",
    "a_lgd = [svhn_exp_ll_errrfill, svhn_ll_errrfill, svhn_full_ll_errrfill]\n",
    "lgd = ax[2, 2].legend(a_lgd,\n",
    "['LDN-$\\mathbb{E}$', 'LDN-95', 'LDN-full'],\n",
    "                prop={'size': fs-1, 'weight': 'normal'}, frameon=True, markerscale=markersize, loc='lower right')\n",
    "\n",
    "### errors\n",
    "## determinisitc\n",
    "mnist_det_err_errrfill, = errorfill(depths, perdd_te_err_means_MNIST, perdd_te_err_stds_MNIST, color=c[3], alpha_fill=0.3, ax=ax[3, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[3, 0].scatter(depths, perdd_te_err_means_MNIST, c=c[3], s=s)\n",
    "\n",
    "fmnist_det_err_errrfill, = errorfill(depths, perdd_te_err_means_FashionMNIST, perdd_te_err_stds_FashionMNIST, color=c[3], alpha_fill=0.3, ax=ax[3, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[3, 1].scatter(depths, perdd_te_err_means_FashionMNIST, c=c[3], s=s)\n",
    "\n",
    "svhn_det_err_errrfill, = errorfill(depths, perdd_te_err_means_SVHN, perdd_te_err_stds_SVHN, color=c[3], alpha_fill=0.3, ax=ax[3, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[3, 2].scatter(depths, perdd_te_err_means_SVHN, c=c[3], s=s)\n",
    "\n",
    "## learnt 95th\n",
    "mnist_err_errrfill, = errorfill(depths, te_err_means_d_MNIST , te_err_stds_d_MNIST, color=c[2], alpha_fill=0.3, ax=ax[3, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[3, 0].scatter(depths, te_err_means_d_MNIST, c=c[2], s=s)\n",
    "\n",
    "fmnist_err_errrfill, = errorfill(depths, te_err_means_d_FashionMNIST , te_err_stds_d_FashionMNIST, color=c[2], alpha_fill=0.3, ax=ax[3, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[3, 1].scatter(depths, te_err_means_d_FashionMNIST, c=c[2], s=s)\n",
    "\n",
    "svhn_err_errrfill, = errorfill(depths, te_err_means_d_SVHN , te_err_stds_d_SVHN, color=c[2], alpha_fill=0.3, ax=ax[3, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[3, 2].scatter(depths, te_err_means_d_SVHN, c=c[2], s=s)\n",
    "\n",
    "a_lgd = [svhn_det_err_errrfill, svhn_err_errrfill]\n",
    "lgd = ax[3, 2].legend(a_lgd,\n",
    "['DDN', 'LDN-95'],\n",
    "                prop={'size': fs-1, 'weight': 'normal'}, frameon=True, markerscale=markersize, loc='upper right')\n",
    "\n",
    "## learnt 95th\n",
    "mnist_err_errrfill, = errorfill(depths, te_err_means_d_MNIST , te_err_stds_d_MNIST, color=c[2], alpha_fill=0.3, ax=ax[4, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 0].scatter(depths, te_err_means_d_MNIST, c=c[2], s=s)\n",
    "\n",
    "fmnist_err_errrfill, = errorfill(depths, te_err_means_d_FashionMNIST , te_err_stds_d_FashionMNIST, color=c[2], alpha_fill=0.3, ax=ax[4, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 1].scatter(depths, te_err_means_d_FashionMNIST, c=c[2], s=s)\n",
    "\n",
    "svhn_err_errrfill, = errorfill(depths, te_err_means_d_SVHN , te_err_stds_d_SVHN, color=c[2], alpha_fill=0.3, ax=ax[4, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 2].scatter(depths, te_err_means_d_SVHN, c=c[2], s=s)\n",
    "\n",
    "## full D\n",
    "mnist_full_err_errrfill, = errorfill(depths, te_err_means_MNIST, te_err_stds_MNIST, color=c[5], alpha_fill=0.3, ax=ax[4, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 0].scatter(depths, te_err_means_MNIST, c=c[5], s=s)\n",
    "\n",
    "fmnist_full_err_errrfill, = errorfill(depths, te_err_means_FashionMNIST, te_err_stds_FashionMNIST, color=c[5], alpha_fill=0.3, ax=ax[4, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 1].scatter(depths, te_err_means_FashionMNIST, c=c[5], s=s)\n",
    "\n",
    "svhn_full_err_errrfill, = errorfill(depths, te_err_means_SVHN , te_err_stds_SVHN, color=c[5], alpha_fill=0.3, ax=ax[4, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 2].scatter(depths, te_err_means_SVHN, c=c[5], s=s)\n",
    "\n",
    "## learnt exp\n",
    "mnist_exp_err_errrfill, = errorfill(depths, te_err_means_d_exp_MNIST , te_err_stds_d_exp_MNIST, color=c[4], alpha_fill=0.3, ax=ax[4, 0], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 0].scatter(depths, te_err_means_d_exp_MNIST, c=c[4], s=s)\n",
    "\n",
    "fmnist_exp_err_errrfill, = errorfill(depths, te_err_means_d_exp_FashionMNIST , te_err_stds_d_exp_FashionMNIST, color=c[4], alpha_fill=0.3, ax=ax[4, 1], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 1].scatter(depths, te_err_means_d_exp_FashionMNIST, c=c[4], s=s)\n",
    "\n",
    "svhn_exp_err_errrfill, = errorfill(depths, te_err_means_d_exp_SVHN , te_err_stds_d_exp_SVHN, color=c[4], alpha_fill=0.3, ax=ax[4, 2], lw=lw,\n",
    "                           linestyle='-')\n",
    "ax[4, 2].scatter(depths, te_err_means_d_exp_SVHN, c=c[4], s=s)\n",
    "\n",
    "a_lgd = [svhn_exp_err_errrfill, svhn_err_errrfill, svhn_full_err_errrfill]\n",
    "lgd = ax[4, 2].legend(a_lgd,\n",
    "['LDN-$\\mathbb{E}$', 'LDN-95', 'LDN-full'],\n",
    "                prop={'size': fs-1, 'weight': 'normal'}, frameon=True, markerscale=markersize, loc='upper right')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig(plot_savedir + 'image_depth_dist_exps2.pdf', format='pdf', bbox_inches='tight')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Calibration plots"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from src.utils import np_get_one_hot\n",
    "\n",
    "def plot_calibration_curve_probs(probs, y_test, n_bins=10, dpi=200, grid_alph=0.3, yax=True, title=None, ax=None, xax=True):\n",
    "    bin_limits = np.linspace(0, 1, n_bins + 1)\n",
    "\n",
    "    bin_step = bin_limits[1] - bin_limits[0]\n",
    "    bin_centers = bin_limits[:-1] + bin_step / 2\n",
    "\n",
    "    bin_probs = []\n",
    "    bin_counts_list = []\n",
    "    for i in range(probs.shape[0]):\n",
    "        all_preds = probs[i]\n",
    "        pred_class = np.argmax(all_preds, axis=1)\n",
    "\n",
    "        expanded_preds = np.reshape(all_preds, -1)\n",
    "        # These reshapes on the one hot vectors count every possible class as a different prediction\n",
    "        pred_class_OH_expand = np.reshape(np_get_one_hot(pred_class, all_preds.shape[1]), -1)\n",
    "        targets_class_OH_expand = np.reshape(np_get_one_hot(y_test.reshape(-1, 1).astype(int), all_preds.shape[1]), -1)\n",
    "        correct_vec = (targets_class_OH_expand * (pred_class_OH_expand == targets_class_OH_expand)).astype(int)\n",
    "\n",
    "        bin_idxs = np.digitize(expanded_preds, bin_limits, right=True) - 1\n",
    "\n",
    "        bin_counts = np.ones(n_bins)\n",
    "        bin_corrects = np.zeros(n_bins)\n",
    "        for nbin in range(n_bins):\n",
    "            bin_counts[nbin] = np.sum((bin_idxs == nbin).astype(int))\n",
    "            bin_corrects[nbin] = np.sum(correct_vec[bin_idxs == nbin])\n",
    "\n",
    "        bin_probs.append(bin_corrects / bin_counts)\n",
    "        bin_counts_list.append(bin_counts)\n",
    "\n",
    "    bin_probs = np.array(bin_probs) # (4, 10)\n",
    "    \n",
    "    bin_counts = np.array(bin_counts_list) # (4, 10)\n",
    "    bin_totals = np.sum(bin_counts, axis=1, keepdims=True) # (4, 1)\n",
    "    bin_errs = bin_probs - np.arange(0.1, 1.1, 0.1) # (4, 10)\n",
    "    eces = np.sum(bin_counts / bin_totals * np.abs(bin_errs), axis=1) # (4,)\n",
    "    ece_mean = np.mean(eces * 100)\n",
    "    ece_std = np.std(eces * 100)\n",
    "\n",
    "\n",
    "    bin_prob_means = np.mean(bin_probs, axis=0)\n",
    "    bin_prob_stds = np.std(bin_probs, axis=0)\n",
    "    \n",
    "\n",
    "    if ax is None:\n",
    "        plt.figure(dpi=dpi)\n",
    "        ax = plt.gca()\n",
    "    bar_ret = ax.bar(bin_centers, bin_prob_means, 1 / n_bins, edgecolor='k', alpha=0.9, yerr=bin_prob_stds)\n",
    "    ax.plot(np.linspace(0, 1, 20), np.linspace(0, 1, 20), '--', c='k')\n",
    "\n",
    "    if xax:\n",
    "        ax.set_xlabel('predicted probability')\n",
    "    if yax:\n",
    "        ax.set_ylabel('correct proportion')\n",
    "\n",
    "    if title is not None:\n",
    "        ax.set_title(title + \", $ %6.2f \\pm %6.3f $\" % (ece_mean, ece_std))\n",
    "    ax.set_xticks(bin_limits[::2])\n",
    "    ax.yaxis.grid(alpha=grid_alph)\n",
    "    ax.xaxis.grid(alpha=grid_alph)\n",
    "    ax.set_ylim((0, 1))\n",
    "    if probs.shape[2] == 2:\n",
    "        ax.set_xlim((0.5, 1))\n",
    "    else:\n",
    "        ax.set_xlim((0, 1))\n",
    "\n",
    "    return bar_ret"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_layers = 50"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate data for SVHN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 3\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32\n",
    "\n",
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.431, 0.430, 0.446), (0.197, 0.198, 0.199))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.431, 0.430, 0.446), (0.197, 0.198, 0.199))\n",
    "])\n",
    "\n",
    "trainset = datasets.SVHN(root='../data', split=\"train\", download=True, transform=transform_train)\n",
    "valset = datasets.SVHN(root='../data', split=\"test\", download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "prior_probs = 0.85 ** (1 + np.arange(n_layers + 1))\n",
    "prior_probs = prior_probs / prior_probs.sum()\n",
    "prob_model = variational_categorical(n_layers, prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                inner_width, n_layers, prob_model)  \n",
    "N_train = len(trainset)\n",
    "lr = 1e-1\n",
    "net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "our_probs_SVHN = []\n",
    "full_int_probs_SVHN = []\n",
    "\n",
    "for i in [0, 1, 2, 3]:\n",
    "    print(i)\n",
    "    our_probs_SVHN.append([])\n",
    "    full_int_probs_SVHN.append([])\n",
    "\n",
    "    net.load('../saves/logs/CNN_BNN_SVHN_cat/max_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "    prbs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "    cuttoff = np.max(prbs)*0.95\n",
    "    # chosen_depth = np.sum(prbs * np.arange(net.model.n_layers + 1))\n",
    "    chosen_depth = np.argmax(prbs > cuttoff)\n",
    "\n",
    "\n",
    "    ys_SVHN = []\n",
    "\n",
    "    for x, y in valloader:\n",
    "        ys_SVHN.append(y.data.numpy())\n",
    "\n",
    "        full_int_probs_SVHN[-1].append(net.sample_predict(x).sum(dim=0).data.cpu().numpy())\n",
    "        our_probs_SVHN[-1].append(net.partial_predict(x, depth=chosen_depth).sum(dim=0).data.cpu().numpy())\n",
    "\n",
    "    our_probs_SVHN[-1] = np.concatenate(our_probs_SVHN[-1])\n",
    "    full_int_probs_SVHN[-1] = np.concatenate(full_int_probs_SVHN[-1])\n",
    "    ys_SVHN = np.concatenate(ys_SVHN)\n",
    "\n",
    "# Load Det Model #######\n",
    "\n",
    "probs = np.zeros(n_layers + 1)\n",
    "probs[-1] = 1\n",
    "\n",
    "prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                inner_width, n_layers, prob_model) \n",
    "net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None)\n",
    "\n",
    "det_probs_SVHN = []\n",
    "for i in [0, 1, 2, 3]:\n",
    "    net.load('../saves/logs/CNN_BNN_SVHN_deterministic_cat/deterministic_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "         + '/theta_best.dat')\n",
    "\n",
    "    det_probs_SVHN.append([])\n",
    "\n",
    "    for x, _ in valloader:\n",
    "        det_probs_SVHN[-1].append(net.sample_predict(x).sum(dim=0).data.cpu().numpy())\n",
    "\n",
    "    det_probs_SVHN[-1] = np.concatenate(det_probs_SVHN[-1])\n",
    "\n",
    "\n",
    "det_probs_SVHN = np.array(det_probs_SVHN)\n",
    "our_probs_SVHN = np.array(our_probs_SVHN)\n",
    "full_int_probs_SVHN = np.array(full_int_probs_SVHN)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Save data for SVHN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# to_save = (\n",
    "#     det_probs_SVHN,\n",
    "#     our_probs_SVHN,\n",
    "#     full_int_probs_SVHN,\n",
    "#     ys_SVHN\n",
    "# )\n",
    "\n",
    "\n",
    "# with open('../saves/paper_plots/spiral_callibration_SVHN.pkl','wb') as f:\n",
    "#      pickle.dump(to_save, f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Load data for SVHN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('../saves/paper_plots/spiral_callibration_SVHN.pkl','rb') as f:\n",
    "     (\n",
    "        det_probs_SVHN,\n",
    "        our_probs_SVHN,\n",
    "        full_int_probs_SVHN,\n",
    "        ys_SVHN\n",
    "    ) = pickle.load(f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Gen plots for SVHN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dpi=500\n",
    "\n",
    "fix, ax = plt.subplots(nrows=1, ncols=3, dpi=dpi,\n",
    "                       figsize=(1*text_width, text_width*0.45), sharey='row')\n",
    "\n",
    "ax[0].set_aspect('equal')\n",
    "ax[1].set_aspect('equal')\n",
    "ax[2].set_aspect('equal')\n",
    "\n",
    "plot_calibration_curve_probs(det_probs_SVHN, ys_SVHN, n_bins=10, dpi=200, grid_alph=0.3, yax=True, title='DDN', ax=ax[0], xax=False)\n",
    "plot_calibration_curve_probs(our_probs_SVHN, ys_SVHN, n_bins=10, dpi=200, grid_alph=0.3, yax=False, title='LDN, $d \\in [0, d_{opt}]$', ax=ax[1])\n",
    "plot_calibration_curve_probs(full_int_probs_SVHN, ys_SVHN, n_bins=10, dpi=200, grid_alph=0.3, yax=False, title='LDN, $d \\in [0, D]$', ax=ax[2], xax=False)\n",
    "\n",
    "plt.tight_layout()\n",
    "\n",
    "\n",
    "\n",
    "plt.savefig(plot_savedir + 'callibration_SVHN.pdf', format='pdf', bbox_inches='tight')"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data for MNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 1\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32\n",
    "\n",
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n",
    "])\n",
    "\n",
    "trainset = datasets.MNIST(root='../data', train=True, download=True, transform=transform_train)\n",
    "valset = datasets.MNIST(root='../data', train=False, download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "prior_probs = 0.85 ** (1 + np.arange(n_layers + 1))\n",
    "prior_probs = prior_probs / prior_probs.sum()\n",
    "prob_model = variational_categorical(n_layers, prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                inner_width, n_layers, prob_model)  \n",
    "N_train = len(trainset)\n",
    "lr = 1e-1\n",
    "net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "our_probs_MNIST = []\n",
    "full_int_probs_MNIST = []\n",
    "\n",
    "for i in [0, 1, 2, 3]:\n",
    "    our_probs_MNIST.append([])\n",
    "    full_int_probs_MNIST.append([])\n",
    "\n",
    "    net.load('../saves/logs/CNN_BNN_MNIST_cat/max_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "    prbs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "    cuttoff = np.max(prbs)*0.95\n",
    "    # chosen_depth = np.sum(prbs * np.arange(net.model.n_layers + 1))\n",
    "    chosen_depth = np.argmax(prbs > cuttoff)\n",
    "\n",
    "\n",
    "    ys_MNIST = []\n",
    "\n",
    "    for x, y in valloader:\n",
    "        ys_MNIST.append(y.data.numpy())\n",
    "\n",
    "        full_int_probs_MNIST[-1].append(net.sample_predict(x).sum(dim=0).data.cpu().numpy())\n",
    "        our_probs_MNIST[-1].append(net.partial_predict(x, depth=chosen_depth).sum(dim=0).data.cpu().numpy())\n",
    "\n",
    "    our_probs_MNIST[-1] = np.concatenate(our_probs_MNIST[-1])\n",
    "    full_int_probs_MNIST[-1] = np.concatenate(full_int_probs_MNIST[-1])\n",
    "    ys_MNIST = np.concatenate(ys_MNIST)\n",
    "\n",
    "# Load Det Model #######\n",
    "\n",
    "probs = np.zeros(n_layers + 1)\n",
    "probs[-1] = 1\n",
    "\n",
    "prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                inner_width, n_layers, prob_model) \n",
    "net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None)\n",
    "\n",
    "det_probs_MNIST = []\n",
    "for i in [0, 1, 2, 3]:\n",
    "    net.load('../saves/logs/CNN_BNN_MNIST_deterministic_cat/deterministic_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "         + '/theta_best.dat')\n",
    "\n",
    "    det_probs_MNIST.append([])\n",
    "\n",
    "    for x, _ in valloader:\n",
    "        det_probs_MNIST[-1].append(net.sample_predict(x).sum(dim=0).data.cpu().numpy())\n",
    "\n",
    "    det_probs_MNIST[-1] = np.concatenate(det_probs_MNIST[-1])\n",
    "\n",
    "\n",
    "det_probs_MNIST = np.array(det_probs_MNIST)\n",
    "our_probs_MNIST = np.array(our_probs_MNIST)\n",
    "full_int_probs_MNIST = np.array(full_int_probs_MNIST)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Save plot data for MNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# to_save = (\n",
    "#     det_probs_MNIST,\n",
    "#     our_probs_MNIST,\n",
    "#     full_int_probs_MNIST,\n",
    "#     ys_MNIST\n",
    "# )\n",
    "\n",
    "\n",
    "# with open('../saves/paper_plots/spiral_callibration_MNIST.pkl','wb') as f:\n",
    "#      pickle.dump(to_save, f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Load plot data for MNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('../saves/paper_plots/spiral_callibration_MNIST.pkl','rb') as f:\n",
    "     (\n",
    "        det_probs_MNIST,\n",
    "        our_probs_MNIST,\n",
    "        full_int_probs_MNIST,\n",
    "        ys_MNIST\n",
    "    ) = pickle.load(f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Gen plots for MNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dpi=800\n",
    "\n",
    "fix, ax = plt.subplots(nrows=1, ncols=3, dpi=dpi,\n",
    "                       figsize=(1*text_width, text_width*0.45), sharey='row')\n",
    "\n",
    "ax[0].set_aspect('equal')\n",
    "ax[1].set_aspect('equal')\n",
    "ax[2].set_aspect('equal')\n",
    "\n",
    "plot_calibration_curve_probs(det_probs_MNIST, ys_MNIST, n_bins=10, dpi=200, grid_alph=0.3, yax=True, title='DDN', ax=ax[0], xax=False)\n",
    "plot_calibration_curve_probs(our_probs_MNIST, ys_MNIST, n_bins=10, dpi=200, grid_alph=0.3, yax=False, title='LDN, $d \\in [0, d_{opt}]$', ax=ax[1])\n",
    "plot_calibration_curve_probs(full_int_probs_MNIST, ys_MNIST, n_bins=10, dpi=200, grid_alph=0.3, yax=False, title='LDN, $d \\in [0, D]$', ax=ax[2], xax=False)\n",
    "\n",
    "plt.tight_layout()\n",
    "\n",
    "\n",
    "\n",
    "plt.savefig(plot_savedir + 'callibration_MNIST.pdf', format='pdf', bbox_inches='tight')"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data for FashionMNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 1\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32\n",
    "\n",
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.2860,), std=(0.3530,))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.2860,), std=(0.3530,))\n",
    "])\n",
    "\n",
    "trainset = datasets.FashionMNIST(root='../data', train=True, download=True, transform=transform_train)\n",
    "valset = datasets.FashionMNIST(root='../data', train=False, download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# LDN\n",
    "\n",
    "prior_probs = 0.85 ** (1 + np.arange(n_layers + 1))\n",
    "prior_probs = prior_probs / prior_probs.sum()\n",
    "prob_model = variational_categorical(n_layers, prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                inner_width, n_layers, prob_model)  \n",
    "N_train = len(trainset)\n",
    "lr = 1e-1\n",
    "net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "our_probs_FashionMNIST = []\n",
    "full_int_probs_FashionMNIST = []\n",
    "\n",
    "for i in [0, 1, 2, 3]:\n",
    "    print(i)\n",
    "    our_probs_FashionMNIST.append([])\n",
    "    full_int_probs_FashionMNIST.append([])\n",
    "\n",
    "    net.load('../saves/logs/CNN_BNN_FashionMNIST_cat/max_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "    prbs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "    cuttoff = np.max(prbs)*0.95\n",
    "    # chosen_depth = np.sum(prbs * np.arange(net.model.n_layers + 1))\n",
    "    chosen_depth = np.argmax(prbs > cuttoff)\n",
    "\n",
    "\n",
    "    ys_FashionMNIST = []\n",
    "\n",
    "    for x, y in valloader:\n",
    "        ys_FashionMNIST.append(y.data.numpy())\n",
    "\n",
    "        full_int_probs_FashionMNIST[-1].append(net.sample_predict(x).sum(dim=0).data.cpu().numpy())\n",
    "        our_probs_FashionMNIST[-1].append(net.partial_predict(x, depth=chosen_depth).sum(dim=0).data.cpu().numpy())\n",
    "\n",
    "    our_probs_FashionMNIST[-1] = np.concatenate(our_probs_FashionMNIST[-1])\n",
    "    full_int_probs_FashionMNIST[-1] = np.concatenate(full_int_probs_FashionMNIST[-1])\n",
    "    ys_FashionMNIST = np.concatenate(ys_FashionMNIST)\n",
    "\n",
    "# DDN\n",
    "\n",
    "probs = np.zeros(n_layers + 1)\n",
    "probs[-1] = 1\n",
    "\n",
    "prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                inner_width, n_layers, prob_model) \n",
    "net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None)\n",
    "\n",
    "det_probs_FashionMNIST = []\n",
    "for i in [0, 1, 2, 3]:\n",
    "    net.load('../saves/logs/CNN_BNN_FashionMNIST_deterministic_cat/deterministic_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "         + '/theta_best.dat')\n",
    "\n",
    "    det_probs_FashionMNIST.append([])\n",
    "\n",
    "    for x, _ in valloader:\n",
    "        det_probs_FashionMNIST[-1].append(net.sample_predict(x).sum(dim=0).data.cpu().numpy())\n",
    "\n",
    "    det_probs_FashionMNIST[-1] = np.concatenate(det_probs_FashionMNIST[-1])\n",
    "\n",
    "\n",
    "det_probs_FashionMNIST = np.array(det_probs_FashionMNIST)\n",
    "our_probs_FashionMNIST = np.array(our_probs_FashionMNIST)\n",
    "full_int_probs_FashionMNIST = np.array(full_int_probs_FashionMNIST)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Save plot data for FashionMNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# to_save = (\n",
    "#     det_probs_FashionMNIST,\n",
    "#     our_probs_FashionMNIST,\n",
    "#     full_int_probs_FashionMNIST,\n",
    "#     ys_FashionMNIST\n",
    "# )\n",
    "\n",
    "\n",
    "# with open('../saves/paper_plots/spiral_callibration_FashionMNIST.pkl','wb') as f:\n",
    "#      pickle.dump(to_save, f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Load plot data for FashionMNIST\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('../saves/paper_plots/spiral_callibration_FashionMNIST.pkl','rb') as f:\n",
    "     (\n",
    "        det_probs_FashionMNIST,\n",
    "        our_probs_FashionMNIST,\n",
    "        full_int_probs_FashionMNIST,\n",
    "        ys_FashionMNIST\n",
    "    ) = pickle.load(f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plots for FashionMNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dpi=800\n",
    "\n",
    "fix, ax = plt.subplots(nrows=1, ncols=3, dpi=dpi,\n",
    "                       figsize=(1*text_width, text_width*0.45), sharey='row')\n",
    "\n",
    "ax[0].set_aspect('equal')\n",
    "ax[1].set_aspect('equal')\n",
    "ax[2].set_aspect('equal')\n",
    "\n",
    "plot_calibration_curve_probs(det_probs_FashionMNIST, ys_FashionMNIST, n_bins=10, dpi=200, grid_alph=0.3, yax=True, title='DDN', ax=ax[0], xax=False)\n",
    "plot_calibration_curve_probs(our_probs_FashionMNIST, ys_FashionMNIST, n_bins=10, dpi=200, grid_alph=0.3, yax=False, title='LDN, $d \\in [0, d_{opt}]$', ax=ax[1])\n",
    "plot_calibration_curve_probs(full_int_probs_FashionMNIST, ys_FashionMNIST, n_bins=10, dpi=200, grid_alph=0.3, yax=False, title='LDN, $d \\in [0, D]$', ax=ax[2], xax=False)\n",
    "\n",
    "plt.tight_layout()\n",
    "\n",
    "\n",
    "\n",
    "plt.savefig(plot_savedir + 'callibration_FashionMNIST.pdf', format='pdf', bbox_inches='tight')"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Timing experiments"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import time"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data for SVHN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 3\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32\n",
    "\n",
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.431, 0.430, 0.446), (0.197, 0.198, 0.199))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.431, 0.430, 0.446), (0.197, 0.198, 0.199))\n",
    "])\n",
    "\n",
    "trainset = datasets.SVHN(root='../data', split=\"train\", download=True, transform=transform_train)\n",
    "valset = datasets.SVHN(root='../data', split=\"test\", download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "valset_partial_pred_times_SVHN = []\n",
    "valset_sample_pred_times_SVHN = []\n",
    "valset_det_pred_times_SVHN = []\n",
    "\n",
    "for n_layers in [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100]:\n",
    "    prior_probs = 0.85 ** (1 + np.arange(n_layers + 1))\n",
    "    prior_probs = prior_probs / prior_probs.sum()\n",
    "    prob_model = variational_categorical(n_layers, prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "    model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                        inner_width, n_layers, prob_model)  \n",
    "    N_train = len(trainset)\n",
    "    lr = 1e-1\n",
    "    net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "    for i in range(4):\n",
    "        valset_partial_pred_times_SVHN.append([])\n",
    "        valset_sample_pred_times_SVHN.append([])\n",
    "        valset_det_pred_times_SVHN.append([])\n",
    "        \n",
    "        net.load('../saves/logs/CNN_BNN_SVHN_cat/max_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "        probs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "        cuttoff = np.max(probs)*0.95\n",
    "        # depth_exp = np.sum(probs * np.arange(net.model.n_layers + 1))\n",
    "        chosen_depth = np.argmax(probs > cuttoff)\n",
    "\n",
    "        for _ in range(10):\n",
    "            tic = time.time()\n",
    "            for x, _ in valloader:\n",
    "                if cuda:\n",
    "                    y = y.cuda()\n",
    "\n",
    "                prbs = net.partial_predict(x, depth=chosen_depth).sum(dim=0)\n",
    "                pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "            toc = time.time()\n",
    "            valset_partial_pred_times_SVHN[-1].append(toc - tic)\n",
    "\n",
    "        for _ in range(10):\n",
    "            tic = time.time()\n",
    "            for x, _ in valloader:\n",
    "                if cuda:\n",
    "                    y = y.cuda()\n",
    "\n",
    "                prbs = net.sample_predict(x).sum(dim=0)\n",
    "                pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "            toc = time.time()\n",
    "            valset_sample_pred_times_SVHN[-1].append(toc - tic)\n",
    "\n",
    "        probs = np.zeros(n_layers + 1)\n",
    "        probs[-1] = 1\n",
    "\n",
    "        prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                        inner_width, n_layers, prob_model) \n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None)\n",
    "\n",
    "        \n",
    "        net.load('../saves/logs/CNN_BNN_SVHN_deterministic_cat/deterministic_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "        for _ in range(10):\n",
    "            tic = time.time()\n",
    "            for x, _ in valloader:\n",
    "                if cuda:\n",
    "                    y = y.cuda()\n",
    "\n",
    "                prbs = net.sample_predict(x).sum(dim=0)\n",
    "                pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "            toc = time.time()\n",
    "            valset_det_pred_times_SVHN[-1].append(toc - tic)\n",
    "\n",
    "    valset_partial_pred_times_SVHN[-1] = np.mean(np.array(valset_partial_pred_times_SVHN[-1]), axis=1)\n",
    "    valset_sample_pred_times_SVHN[-1] = np.mean(np.array(valset_sample_pred_times_SVHN[-1]), axis=1)\n",
    "    valset_det_pred_times_SVHN[-1] = np.mean(np.array(valset_det_pred_times_SVHN[-1]), axis=1)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Save plot data\n",
    "\n",
    "# to_save = (\n",
    "#     valset_partial_pred_times_SVHN,\n",
    "#     valset_sample_pred_times_SVHN,\n",
    "#     valset_det_pred_times_SVHN\n",
    "# )\n",
    "\n",
    "\n",
    "# with open('../saves/paper_plots/times_SVHN.pkl','wb') as f:\n",
    "#      pickle.dump(to_save, f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data for MNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 1\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32\n",
    "\n",
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n",
    "])\n",
    "\n",
    "trainset = datasets.MNIST(root='../data', train=True, download=True, transform=transform_train)\n",
    "valset = datasets.MNIST(root='../data', train=False, download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "valset_partial_pred_times_MNIST = []\n",
    "valset_sample_pred_times_MNIST = []\n",
    "valset_det_pred_times_MNIST = []\n",
    "\n",
    "for n_layers in [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100]:\n",
    "    prior_probs = 0.85 ** (1 + np.arange(n_layers + 1))\n",
    "    prior_probs = prior_probs / prior_probs.sum()\n",
    "    prob_model = variational_categorical(n_layers, prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "    model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                        inner_width, n_layers, prob_model)  \n",
    "    N_train = len(trainset)\n",
    "    lr = 1e-1\n",
    "    net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "    valset_partial_pred_times_MNIST.append([])\n",
    "    valset_sample_pred_times_MNIST.append([])\n",
    "    valset_det_pred_times_MNIST.append([])\n",
    "\n",
    "    for i in range(4):\n",
    "        net.load('../saves/logs/CNN_BNN_MNIST_cat/max_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "        probs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "        cuttoff = np.max(probs)*0.95\n",
    "        # depth_exp = np.sum(probs * np.arange(net.model.n_layers + 1))\n",
    "        chosen_depth = np.argmax(probs > cuttoff)\n",
    "\n",
    "        tic = time.time()\n",
    "        for x, _ in valloader:\n",
    "            if cuda:\n",
    "                y = y.cuda()\n",
    "\n",
    "            prbs = net.partial_predict(x, depth=chosen_depth).sum(dim=0)\n",
    "            pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "        toc = time.time()\n",
    "        valset_partial_pred_times_MNIST[-1].append(toc - tic)\n",
    "\n",
    "        tic = time.time()\n",
    "        for x, _ in valloader:\n",
    "            if cuda:\n",
    "                y = y.cuda()\n",
    "\n",
    "            prbs = net.sample_predict(x).sum(dim=0)\n",
    "            pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "        toc = time.time()\n",
    "        valset_sample_pred_times_MNIST[-1].append(toc - tic)\n",
    "\n",
    "        probs = np.zeros(n_layers + 1)\n",
    "        probs[-1] = 1\n",
    "\n",
    "        prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                        inner_width, n_layers, prob_model) \n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None)\n",
    "\n",
    "        \n",
    "        net.load('../saves/logs/CNN_BNN_MNIST_deterministic_cat/deterministic_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "        tic = time.time()\n",
    "        for x, _ in valloader:\n",
    "            if cuda:\n",
    "                y = y.cuda()\n",
    "\n",
    "            prbs = net.sample_predict(x).sum(dim=0)\n",
    "            pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "        toc = time.time()\n",
    "        valset_det_pred_times_MNIST[-1].append(toc - tic)\n",
    "\n",
    "    valset_partial_pred_times_MNIST[-1] = np.array(valset_partial_pred_times_MNIST[-1])\n",
    "    valset_sample_pred_times_MNIST[-1] = np.array(valset_sample_pred_times_MNIST[-1])\n",
    "    valset_det_pred_times_MNIST[-1] = np.array(valset_det_pred_times_MNIST[-1])\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# to_save = (\n",
    "#     valset_partial_pred_times_MNIST,\n",
    "#     valset_sample_pred_times_MNIST,\n",
    "#     valset_det_pred_times_MNIST\n",
    "# )\n",
    "\n",
    "\n",
    "# with open('../saves/paper_plots/times_MNIST.pkl','wb') as f:\n",
    "#      pickle.dump(to_save, f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plot data for FashionMNIST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "input_chan = 1\n",
    "output_dim = 10\n",
    "outer_width = 64\n",
    "inner_width = 32\n",
    "\n",
    "transform_train = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.2860,), std=(0.3530,))\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=(0.2860,), std=(0.3530,))\n",
    "])\n",
    "\n",
    "trainset = datasets.FashionMNIST(root='../data', train=True, download=True, transform=transform_train)\n",
    "valset = datasets.FashionMNIST(root='../data', train=False, download=True, transform=transform_test)\n",
    "\n",
    "batch_size = 128\n",
    "if cuda:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,\n",
    "                                            num_workers=3)\n",
    "\n",
    "else:\n",
    "    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,\n",
    "                                                num_workers=3)\n",
    "    valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,\n",
    "                                            num_workers=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "valset_partial_pred_times_FashionMNIST = []\n",
    "valset_sample_pred_times_FashionMNIST = []\n",
    "valset_det_pred_times_FashionMNIST = []\n",
    "\n",
    "for n_layers in [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100]:\n",
    "    prior_probs = 0.85 ** (1 + np.arange(n_layers + 1))\n",
    "    prior_probs = prior_probs / prior_probs.sum()\n",
    "    prob_model = variational_categorical(n_layers, prior_probs, temp=0.1, eps=1e-10, cuda=cuda)\n",
    "    model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                        inner_width, n_layers, prob_model)  \n",
    "    N_train = len(trainset)\n",
    "    lr = 1e-1\n",
    "    net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None) \n",
    "\n",
    "    valset_partial_pred_times_FashionMNIST.append([])\n",
    "    valset_sample_pred_times_FashionMNIST.append([])\n",
    "    valset_det_pred_times_FashionMNIST.append([])\n",
    "\n",
    "    for i in range(4):\n",
    "        net.load('../saves/logs/CNN_BNN_FashionMNIST_cat/max_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "        probs = net.model.prob_model.get_q_probs().data.cpu().numpy()\n",
    "        cuttoff = np.max(probs)*0.95\n",
    "        # depth_exp = np.sum(probs * np.arange(net.model.n_layers + 1))\n",
    "        chosen_depth = np.argmax(probs > cuttoff)\n",
    "\n",
    "        tic = time.time()\n",
    "        for x, _ in valloader:\n",
    "            if cuda:\n",
    "                y = y.cuda()\n",
    "\n",
    "            prbs = net.partial_predict(x, depth=chosen_depth).sum(dim=0)\n",
    "            pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "        toc = time.time()\n",
    "        valset_partial_pred_times_FashionMNIST[-1].append(toc - tic)\n",
    "\n",
    "        tic = time.time()\n",
    "        for x, _ in valloader:\n",
    "            if cuda:\n",
    "                y = y.cuda()\n",
    "\n",
    "            prbs = net.sample_predict(x).sum(dim=0)\n",
    "            pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "        toc = time.time()\n",
    "        valset_sample_pred_times_FashionMNIST[-1].append(toc - tic)\n",
    "\n",
    "        probs = np.zeros(n_layers + 1)\n",
    "        probs[-1] = 1\n",
    "\n",
    "        prob_model = fixed_probs(n_layers, probs=probs, distribution_name='cat', cuda=True)\n",
    "        model = arq_uncert_conv2d_resnet(input_chan, output_dim, outer_width, \n",
    "                                        inner_width, n_layers, prob_model) \n",
    "        net = MF_BNN_cat(model, N_train, lr=lr, cuda=cuda, schedule=None)\n",
    "\n",
    "        \n",
    "        net.load('../saves/logs/CNN_BNN_FashionMNIST_deterministic_cat/deterministic_depth_scan/' + str(n_layers) + '/version_%d/models' % i \n",
    "            + '/theta_best.dat')\n",
    "\n",
    "        tic = time.time()\n",
    "        for x, _ in valloader:\n",
    "            if cuda:\n",
    "                y = y.cuda()\n",
    "\n",
    "            prbs = net.sample_predict(x).sum(dim=0)\n",
    "            pred = prbs.max(dim=1, keepdim=False)[1]\n",
    "\n",
    "        toc = time.time()\n",
    "        valset_det_pred_times_FashionMNIST[-1].append(toc - tic)\n",
    "\n",
    "    valset_partial_pred_times_FashionMNIST[-1] = np.array(valset_partial_pred_times_FashionMNIST[-1])\n",
    "    valset_sample_pred_times_FashionMNIST[-1] = np.array(valset_sample_pred_times_FashionMNIST[-1])\n",
    "    valset_det_pred_times_FashionMNIST[-1] = np.array(valset_det_pred_times_FashionMNIST[-1])\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# to_save = (\n",
    "#     valset_partial_pred_times_FashionMNIST,\n",
    "#     valset_sample_pred_times_FashionMNIST,\n",
    "#     valset_det_pred_times_FashionMNIST\n",
    "# )\n",
    "\n",
    "\n",
    "# with open('../saves/paper_plots/times_FashionMNIST.pkl','wb') as f:\n",
    "#      pickle.dump(to_save, f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Load plot data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('../saves/paper_plots/times_SVHN.pkl','rb') as f:\n",
    "     (\n",
    "        valset_partial_pred_times_SVHN,\n",
    "        valset_sample_pred_times_SVHN,\n",
    "        valset_det_pred_times_SVHN\n",
    "     ) = pickle.load(f)\n",
    "\n",
    "with open('../saves/paper_plots/times_MNIST.pkl','rb') as f:\n",
    "     (\n",
    "        valset_partial_pred_times_MNIST,\n",
    "        valset_sample_pred_times_MNIST,\n",
    "        valset_det_pred_times_MNIST\n",
    "     ) = pickle.load(f)\n",
    "\n",
    "with open('../saves/paper_plots/times_FashionMNIST.pkl','rb') as f:\n",
    "     (\n",
    "        valset_partial_pred_times_FashionMNIST,\n",
    "        valset_sample_pred_times_FashionMNIST,\n",
    "        valset_det_pred_times_FashionMNIST\n",
    "     ) = pickle.load(f)"
   ]
  },
  {
   "cell_type": "markdown",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Generate plots"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "speedups_SVHN = 1 - np.array(valset_partial_pred_times_SVHN) / np.array(valset_det_pred_times_SVHN)\n",
    "speedups_MNIST = 1 - np.array(valset_partial_pred_times_MNIST) / np.array(valset_det_pred_times_MNIST)\n",
    "speedups_FashionMNIST = 1 - np.array(valset_partial_pred_times_FashionMNIST) / np.array(valset_det_pred_times_FashionMNIST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mean_speedup_SVHN, std_speedup_SVHN = get_run_mean_std(speedups_SVHN * 100)\n",
    "mean_speedup_MNIST, std_speedup_MNIST = get_run_mean_std(speedups_MNIST * 100)\n",
    "mean_speedup_FashionMNIST, std_speedup_FashionMNIST = get_run_mean_std(speedups_FashionMNIST * 100)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dpi=800\n",
    "\n",
    "fig, ax = plt.subplots(nrows=1, ncols=3, dpi=dpi, figsize=(1.1*text_width, 1.8))\n",
    "\n",
    "depths = [str(x) for x in [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100]]\n",
    "\n",
    "ax[0].bar(depths, mean_speedup_MNIST, edgecolor='k', alpha=0.9, yerr=std_speedup_MNIST)\n",
    "ax[1].bar(depths, mean_speedup_FashionMNIST, edgecolor='k', alpha=0.9, yerr=std_speedup_FashionMNIST)\n",
    "ax[2].bar(depths, mean_speedup_SVHN, edgecolor='k', alpha=0.9, yerr=std_speedup_SVHN)\n",
    "\n",
    "ax[0].set_xticklabels(depths, rotation=45)\n",
    "ax[1].set_xticklabels(depths, rotation=45)\n",
    "ax[2].set_xticklabels(depths, rotation=45)\n",
    "\n",
    "ax[0].set_ylim([np.min(mean_speedup_MNIST - std_speedup_MNIST), np.max(mean_speedup_MNIST + std_speedup_MNIST)])\n",
    "ax[1].set_ylim([np.min(mean_speedup_FashionMNIST - std_speedup_FashionMNIST), np.max(mean_speedup_FashionMNIST + std_speedup_FashionMNIST)])\n",
    "ax[2].set_ylim([np.min(mean_speedup_SVHN - std_speedup_SVHN), np.max(mean_speedup_SVHN + std_speedup_SVHN)])\n",
    "\n",
    "ax[0].set_xlim([-0.5, 15.5])\n",
    "ax[1].set_xlim([-0.5, 15.5])\n",
    "ax[2].set_xlim([-0.5, 15.5])\n",
    "\n",
    "ax[0].yaxis.grid(alpha=0.3)\n",
    "ax[0].xaxis.grid(alpha=0.3)\n",
    "ax[1].yaxis.grid(alpha=0.3)\n",
    "ax[1].xaxis.grid(alpha=0.3)\n",
    "ax[2].yaxis.grid(alpha=0.3)\n",
    "ax[2].xaxis.grid(alpha=0.3)\n",
    "\n",
    "ax[0].set_ylabel(\"Speedup (\\%)\")\n",
    "ax[1].set_xlabel(\"D (Max depth)\")\n",
    "\n",
    "plt.savefig(plot_savedir + 'image_speedups.pdf', format='pdf', bbox_inches='tight')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4-final"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}