{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!nvidia-smi\n",
    "#using a GeForce GTX1080 Ti for reproducibility for all timing experiments"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torchvision\n",
    "from torch import nn, optim, autograd\n",
    "from torch.nn import functional as F\n",
    "from torch.distributions.multivariate_normal import MultivariateNormal\n",
    "from torch.autograd import Variable\n",
    "import numpy as np\n",
    "from sklearn.utils import shuffle as skshuffle\n",
    "from math import *\n",
    "from backpack import backpack, extend\n",
    "from backpack.extensions import KFAC, DiagHessian, DiagGGNMC\n",
    "from sklearn.metrics import roc_auc_score\n",
    "import scipy\n",
    "from tqdm import tqdm, trange\n",
    "import pytest\n",
    "from LB_utils import * \n",
    "import time\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "s = 123\n",
    "np.random.seed(s)\n",
    "torch.manual_seed(s)\n",
    "torch.cuda.manual_seed(s)\n",
    "torch.backends.cudnn.deterministic = True\n",
    "torch.backends.cudnn.benchmark = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "device:  cuda\n",
      "cuda status:  True\n"
     ]
    }
   ],
   "source": [
    "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "cuda_status = torch.cuda.is_available()\n",
    "print(\"device: \", device)\n",
    "print(\"cuda status: \", cuda_status)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "class LPADirNN(nn.Module):\n",
    "    \n",
    "    def __init__(self, num_classes=10):\n",
    "        super(LPADirNN, self).__init__()\n",
    "        \n",
    "        self.features = torch.nn.Sequential(\n",
    "            torch.nn.Conv2d(1, 32, 5),\n",
    "            torch.nn.ReLU(),\n",
    "            torch.nn.MaxPool2d(2,2),\n",
    "            torch.nn.Conv2d(32, 64, 5),\n",
    "            torch.nn.ReLU(),\n",
    "            torch.nn.MaxPool2d(2,2),\n",
    "            torch.nn.Flatten()     \n",
    "        )\n",
    "        self.linear = torch.nn.Linear(4 * 4 * 64, num_classes) \n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.features(x)\n",
    "        out = self.linear(out)\n",
    "        return out\n",
    "\n",
    "def LPADirNN_last_layer(num_classes=10):\n",
    "    return(LPADirNN(num_classes=num_classes))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "BATCH_SIZE_TRAIN_MNIST = 128\n",
    "BATCH_SIZE_TEST_MNIST = 128\n",
    "MAX_ITER_MNIST = 6\n",
    "LR_TRAIN_MNIST = 10e-6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "MNIST_transform = torchvision.transforms.ToTensor()\n",
    "\n",
    "MNIST_train = torchvision.datasets.MNIST(\n",
    "        '~/data/mnist',\n",
    "        train=True,\n",
    "        download=True,\n",
    "        transform=MNIST_transform)\n",
    "\n",
    "mnist_train_loader = torch.utils.data.dataloader.DataLoader(\n",
    "    MNIST_train,\n",
    "    batch_size=BATCH_SIZE_TRAIN_MNIST,\n",
    "    shuffle=True\n",
    ")\n",
    "\n",
    "\n",
    "MNIST_test = torchvision.datasets.MNIST(\n",
    "        '~/data/mnist',\n",
    "        train=False,\n",
    "        download=False,\n",
    "        transform=MNIST_transform)\n",
    "\n",
    "mnist_test_loader = torch.utils.data.dataloader.DataLoader(\n",
    "    MNIST_test,\n",
    "    batch_size=BATCH_SIZE_TEST_MNIST,\n",
    "    shuffle=False,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist_model = LPADirNN_last_layer().cuda()\n",
    "loss_function = torch.nn.CrossEntropyLoss()\n",
    "\n",
    "mnist_train_optimizer = torch.optim.Adam(mnist_model.parameters(), lr=1e-3, weight_decay=5e-4)\n",
    "MNIST_PATH = \"pretrained_weights/MNIST_pretrained_10_classes_last_layer_s{}.pth\".format(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Training routine\n",
    "\n",
    "def train(model, train_loader, optimizer, max_iter, path, verbose=True):\n",
    "    max_len = len(train_loader)\n",
    "\n",
    "    for iter in range(max_iter):\n",
    "        for batch_idx, (x, y) in enumerate(train_loader):\n",
    "            \n",
    "            x, y = x.cuda(), y.cuda()\n",
    "            \n",
    "            output = model(x)\n",
    "\n",
    "            accuracy = get_accuracy(output, y)\n",
    "\n",
    "            loss = loss_function(output, y)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            if verbose and batch_idx % 50 == 0:\n",
    "                print(\n",
    "                    \"Iteration {}; {}/{} \\t\".format(iter, batch_idx, max_len) +\n",
    "                    \"Minibatch Loss %.3f  \" % (loss) +\n",
    "                    \"Accuracy %.0f\" % (accuracy * 100) + \"%\"\n",
    "                )\n",
    "\n",
    "    print(\"saving model at: {}\".format(path))\n",
    "    torch.save(mnist_model.state_dict(), path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "#train(mnist_model, mnist_train_loader, mnist_train_optimizer, MAX_ITER_MNIST, MNIST_PATH, verbose=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading model from: pretrained_weights/MNIST_pretrained_10_classes_last_layer.pth\n",
      "Batch 0/79 \tAccuracy 100%\n",
      "Batch 10/79 \tAccuracy 97%\n",
      "Batch 20/79 \tAccuracy 98%\n",
      "Batch 30/79 \tAccuracy 100%\n",
      "Batch 40/79 \tAccuracy 100%\n",
      "Batch 50/79 \tAccuracy 100%\n",
      "Batch 60/79 \tAccuracy 100%\n",
      "Batch 70/79 \tAccuracy 98%\n",
      "overall test accuracy on MNIST: 98.90 %\n"
     ]
    }
   ],
   "source": [
    "#predict in distribution\n",
    "#MNIST_PATH = \"pretrained_weights/MNIST_pretrained_10_classes_last_layer_s{}.pth\".format(s)\n",
    "MNIST_PATH = \"pretrained_weights/MNIST_pretrained_10_classes_last_layer.pth\"\n",
    "\n",
    "mnist_model = LPADirNN_last_layer().cuda()\n",
    "print(\"loading model from: {}\".format(MNIST_PATH))\n",
    "mnist_model.load_state_dict(torch.load(MNIST_PATH))\n",
    "mnist_model.eval()\n",
    "\n",
    "acc = []\n",
    "\n",
    "max_len = len(mnist_test_loader)\n",
    "for batch_idx, (x, y) in enumerate(mnist_test_loader):\n",
    "\n",
    "    x, y = x.cuda(), y.cuda()\n",
    "    output = mnist_model(x)\n",
    "\n",
    "    accuracy = get_accuracy(output, y)\n",
    "    if batch_idx % 10 == 0:\n",
    "        print(\n",
    "            \"Batch {}/{} \\t\".format(batch_idx, max_len) + \n",
    "            \"Accuracy %.0f\" % (accuracy * 100) + \"%\"\n",
    "        )\n",
    "    acc.append(accuracy)\n",
    "\n",
    "avg_acc = np.mean(acc)\n",
    "print('overall test accuracy on MNIST: {:.02f} %'.format(avg_acc * 100))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "BATCH_SIZE_TEST_FMNIST = 128\n",
    "BATCH_SIZE_TEST_KMNIST = 128"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "FMNIST_test = torchvision.datasets.FashionMNIST(\n",
    "        '~/data/fmnist', train=False, download=True,\n",
    "        transform=MNIST_transform)   #torchvision.transforms.ToTensor())\n",
    "\n",
    "FMNIST_test_loader = torch.utils.data.DataLoader(\n",
    "    FMNIST_test,\n",
    "    batch_size=BATCH_SIZE_TEST_FMNIST, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "KMNIST_test = torchvision.datasets.KMNIST(\n",
    "        '~/data/kmnist', train=False, download=True,\n",
    "        transform=MNIST_transform)\n",
    "\n",
    "KMNIST_test_loader = torch.utils.data.DataLoader(\n",
    "    KMNIST_test,\n",
    "    batch_size=BATCH_SIZE_TEST_KMNIST, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"Load notMNIST\"\"\"\n",
    "\n",
    "import os\n",
    "import numpy as np\n",
    "import torch\n",
    "from PIL import Image\n",
    "from torch.utils.data.dataset import Dataset\n",
    "from matplotlib.pyplot import imread\n",
    "from torch import Tensor\n",
    "\n",
    "\"\"\"\n",
    "Loads the train/test set. \n",
    "Every image in the dataset is 28x28 pixels and the labels are numbered from 0-9\n",
    "for A-J respectively.\n",
    "Set root to point to the Train/Test folders.\n",
    "\"\"\"\n",
    "\n",
    "# Creating a sub class of torch.utils.data.dataset.Dataset\n",
    "class notMNIST(Dataset):\n",
    "\n",
    "    # The init method is called when this class will be instantiated\n",
    "    def __init__(self, root, transform):\n",
    "        \n",
    "        #super(notMNIST, self).__init__(root, transform=transform)\n",
    "\n",
    "        self.transform = transform\n",
    "        \n",
    "        Images, Y = [], []\n",
    "        folders = os.listdir(root)\n",
    "\n",
    "        for folder in folders:\n",
    "            folder_path = os.path.join(root, folder)\n",
    "            for ims in os.listdir(folder_path):\n",
    "                try:\n",
    "                    img_path = os.path.join(folder_path, ims)\n",
    "                    Images.append(np.array(imread(img_path)))\n",
    "                    Y.append(ord(folder) - 65)  # Folders are A-J so labels will be 0-9\n",
    "                except:\n",
    "                    # Some images in the dataset are damaged\n",
    "                    print(\"File {}/{} is broken\".format(folder, ims))\n",
    "        data = [(x, y) for x, y in zip(Images, Y)]\n",
    "        self.data = data\n",
    "        self.targets = torch.Tensor(Y)\n",
    "\n",
    "    # The number of items in the dataset\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    # The Dataloader is a generator that repeatedly calls the getitem method.\n",
    "    # getitem is supposed to return (X, Y) for the specified index.\n",
    "    def __getitem__(self, index):\n",
    "        img = self.data[index][0]\n",
    "\n",
    "        if self.transform is not None:\n",
    "            img = self.transform(img)\n",
    "            \n",
    "        # Input for Conv2D should be Channels x Height x Width\n",
    "        img_tensor = Tensor(img).view(1, 28, 28).float()\n",
    "        label = self.data[index][1]\n",
    "        return (img_tensor, label)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "File F/Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png is broken\n",
      "File A/RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png is broken\n"
     ]
    }
   ],
   "source": [
    "#root = os.path.abspath('~/data')\n",
    "root = os.path.expanduser('~/data')\n",
    "\n",
    "# Instantiating the notMNIST dataset class we created\n",
    "notMNIST_test = notMNIST(root=os.path.join(root, 'notMNIST_small'),\n",
    "                               transform=MNIST_transform)\n",
    "\n",
    "# Creating a dataloader\n",
    "not_mnist_test_loader = torch.utils.data.dataloader.DataLoader(\n",
    "                            dataset=notMNIST_test,\n",
    "                            batch_size=BATCH_SIZE_TEST_KMNIST,\n",
    "                            shuffle=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Prepare Gaussians"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "n: 1024 inputs to linear layer with m: 10 classes\n",
      "Batch: 0/469\n",
      "Batch: 1/469\n",
      "Batch: 2/469\n",
      "Batch: 3/469\n",
      "Batch: 4/469\n",
      "Batch: 5/469\n",
      "Batch: 6/469\n",
      "Batch: 7/469\n",
      "Batch: 8/469\n",
      "Batch: 9/469\n",
      "Batch: 10/469\n",
      "Batch: 11/469\n",
      "Batch: 12/469\n",
      "Batch: 13/469\n",
      "Batch: 14/469\n",
      "Batch: 15/469\n",
      "Batch: 16/469\n",
      "Batch: 17/469\n",
      "Batch: 18/469\n",
      "Batch: 19/469\n",
      "Batch: 20/469\n",
      "Batch: 21/469\n",
      "Batch: 22/469\n",
      "Batch: 23/469\n",
      "Batch: 24/469\n",
      "Batch: 25/469\n",
      "Batch: 26/469\n",
      "Batch: 27/469\n",
      "Batch: 28/469\n",
      "Batch: 29/469\n",
      "Batch: 30/469\n",
      "Batch: 31/469\n",
      "Batch: 32/469\n",
      "Batch: 33/469\n",
      "Batch: 34/469\n",
      "Batch: 35/469\n",
      "Batch: 36/469\n",
      "Batch: 37/469\n",
      "Batch: 38/469\n",
      "Batch: 39/469\n",
      "Batch: 40/469\n",
      "Batch: 41/469\n",
      "Batch: 42/469\n",
      "Batch: 43/469\n",
      "Batch: 44/469\n",
      "Batch: 45/469\n",
      "Batch: 46/469\n",
      "Batch: 47/469\n",
      "Batch: 48/469\n",
      "Batch: 49/469\n",
      "Batch: 50/469\n",
      "Batch: 51/469\n",
      "Batch: 52/469\n",
      "Batch: 53/469\n",
      "Batch: 54/469\n",
      "Batch: 55/469\n",
      "Batch: 56/469\n",
      "Batch: 57/469\n",
      "Batch: 58/469\n",
      "Batch: 59/469\n",
      "Batch: 60/469\n",
      "Batch: 61/469\n",
      "Batch: 62/469\n",
      "Batch: 63/469\n",
      "Batch: 64/469\n",
      "Batch: 65/469\n",
      "Batch: 66/469\n",
      "Batch: 67/469\n",
      "Batch: 68/469\n",
      "Batch: 69/469\n",
      "Batch: 70/469\n",
      "Batch: 71/469\n",
      "Batch: 72/469\n",
      "Batch: 73/469\n",
      "Batch: 74/469\n",
      "Batch: 75/469\n",
      "Batch: 76/469\n",
      "Batch: 77/469\n",
      "Batch: 78/469\n",
      "Batch: 79/469\n",
      "Batch: 80/469\n",
      "Batch: 81/469\n",
      "Batch: 82/469\n",
      "Batch: 83/469\n",
      "Batch: 84/469\n",
      "Batch: 85/469\n",
      "Batch: 86/469\n",
      "Batch: 87/469\n",
      "Batch: 88/469\n",
      "Batch: 89/469\n",
      "Batch: 90/469\n",
      "Batch: 91/469\n",
      "Batch: 92/469\n",
      "Batch: 93/469\n",
      "Batch: 94/469\n",
      "Batch: 95/469\n",
      "Batch: 96/469\n",
      "Batch: 97/469\n",
      "Batch: 98/469\n",
      "Batch: 99/469\n",
      "Batch: 100/469\n",
      "Batch: 101/469\n",
      "Batch: 102/469\n",
      "Batch: 103/469\n",
      "Batch: 104/469\n",
      "Batch: 105/469\n",
      "Batch: 106/469\n",
      "Batch: 107/469\n",
      "Batch: 108/469\n",
      "Batch: 109/469\n",
      "Batch: 110/469\n",
      "Batch: 111/469\n",
      "Batch: 112/469\n",
      "Batch: 113/469\n",
      "Batch: 114/469\n",
      "Batch: 115/469\n",
      "Batch: 116/469\n",
      "Batch: 117/469\n",
      "Batch: 118/469\n",
      "Batch: 119/469\n",
      "Batch: 120/469\n",
      "Batch: 121/469\n",
      "Batch: 122/469\n",
      "Batch: 123/469\n",
      "Batch: 124/469\n",
      "Batch: 125/469\n",
      "Batch: 126/469\n",
      "Batch: 127/469\n",
      "Batch: 128/469\n",
      "Batch: 129/469\n",
      "Batch: 130/469\n",
      "Batch: 131/469\n",
      "Batch: 132/469\n",
      "Batch: 133/469\n",
      "Batch: 134/469\n",
      "Batch: 135/469\n",
      "Batch: 136/469\n",
      "Batch: 137/469\n",
      "Batch: 138/469\n",
      "Batch: 139/469\n",
      "Batch: 140/469\n",
      "Batch: 141/469\n",
      "Batch: 142/469\n",
      "Batch: 143/469\n",
      "Batch: 144/469\n",
      "Batch: 145/469\n",
      "Batch: 146/469\n",
      "Batch: 147/469\n",
      "Batch: 148/469\n",
      "Batch: 149/469\n",
      "Batch: 150/469\n",
      "Batch: 151/469\n",
      "Batch: 152/469\n",
      "Batch: 153/469\n",
      "Batch: 154/469\n",
      "Batch: 155/469\n",
      "Batch: 156/469\n",
      "Batch: 157/469\n",
      "Batch: 158/469\n",
      "Batch: 159/469\n",
      "Batch: 160/469\n",
      "Batch: 161/469\n",
      "Batch: 162/469\n",
      "Batch: 163/469\n",
      "Batch: 164/469\n",
      "Batch: 165/469\n",
      "Batch: 166/469\n",
      "Batch: 167/469\n",
      "Batch: 168/469\n",
      "Batch: 169/469\n",
      "Batch: 170/469\n",
      "Batch: 171/469\n",
      "Batch: 172/469\n",
      "Batch: 173/469\n",
      "Batch: 174/469\n",
      "Batch: 175/469\n",
      "Batch: 176/469\n",
      "Batch: 177/469\n",
      "Batch: 178/469\n",
      "Batch: 179/469\n",
      "Batch: 180/469\n",
      "Batch: 181/469\n",
      "Batch: 182/469\n",
      "Batch: 183/469\n",
      "Batch: 184/469\n",
      "Batch: 185/469\n",
      "Batch: 186/469\n",
      "Batch: 187/469\n",
      "Batch: 188/469\n",
      "Batch: 189/469\n",
      "Batch: 190/469\n",
      "Batch: 191/469\n",
      "Batch: 192/469\n",
      "Batch: 193/469\n",
      "Batch: 194/469\n",
      "Batch: 195/469\n",
      "Batch: 196/469\n",
      "Batch: 197/469\n",
      "Batch: 198/469\n",
      "Batch: 199/469\n",
      "Batch: 200/469\n",
      "Batch: 201/469\n",
      "Batch: 202/469\n",
      "Batch: 203/469\n",
      "Batch: 204/469\n",
      "Batch: 205/469\n",
      "Batch: 206/469\n",
      "Batch: 207/469\n",
      "Batch: 208/469\n",
      "Batch: 209/469\n",
      "Batch: 210/469\n",
      "Batch: 211/469\n",
      "Batch: 212/469\n",
      "Batch: 213/469\n",
      "Batch: 214/469\n",
      "Batch: 215/469\n",
      "Batch: 216/469\n",
      "Batch: 217/469\n",
      "Batch: 218/469\n",
      "Batch: 219/469\n",
      "Batch: 220/469\n",
      "Batch: 221/469\n",
      "Batch: 222/469\n",
      "Batch: 223/469\n",
      "Batch: 224/469\n",
      "Batch: 225/469\n",
      "Batch: 226/469\n",
      "Batch: 227/469\n",
      "Batch: 228/469\n",
      "Batch: 229/469\n",
      "Batch: 230/469\n",
      "Batch: 231/469\n",
      "Batch: 232/469\n",
      "Batch: 233/469\n",
      "Batch: 234/469\n",
      "Batch: 235/469\n",
      "Batch: 236/469\n",
      "Batch: 237/469\n",
      "Batch: 238/469\n",
      "Batch: 239/469\n",
      "Batch: 240/469\n",
      "Batch: 241/469\n",
      "Batch: 242/469\n",
      "Batch: 243/469\n",
      "Batch: 244/469\n",
      "Batch: 245/469\n",
      "Batch: 246/469\n",
      "Batch: 247/469\n",
      "Batch: 248/469\n",
      "Batch: 249/469\n",
      "Batch: 250/469\n",
      "Batch: 251/469\n",
      "Batch: 252/469\n",
      "Batch: 253/469\n",
      "Batch: 254/469\n",
      "Batch: 255/469\n",
      "Batch: 256/469\n",
      "Batch: 257/469\n",
      "Batch: 258/469\n",
      "Batch: 259/469\n",
      "Batch: 260/469\n",
      "Batch: 261/469\n",
      "Batch: 262/469\n",
      "Batch: 263/469\n",
      "Batch: 264/469\n",
      "Batch: 265/469\n",
      "Batch: 266/469\n",
      "Batch: 267/469\n",
      "Batch: 268/469\n",
      "Batch: 269/469\n",
      "Batch: 270/469\n",
      "Batch: 271/469\n",
      "Batch: 272/469\n",
      "Batch: 273/469\n",
      "Batch: 274/469\n",
      "Batch: 275/469\n",
      "Batch: 276/469\n",
      "Batch: 277/469\n",
      "Batch: 278/469\n",
      "Batch: 279/469\n",
      "Batch: 280/469\n",
      "Batch: 281/469\n",
      "Batch: 282/469\n",
      "Batch: 283/469\n",
      "Batch: 284/469\n",
      "Batch: 285/469\n",
      "Batch: 286/469\n",
      "Batch: 287/469\n",
      "Batch: 288/469\n",
      "Batch: 289/469\n",
      "Batch: 290/469\n",
      "Batch: 291/469\n",
      "Batch: 292/469\n",
      "Batch: 293/469\n",
      "Batch: 294/469\n",
      "Batch: 295/469\n",
      "Batch: 296/469\n",
      "Batch: 297/469\n",
      "Batch: 298/469\n",
      "Batch: 299/469\n",
      "Batch: 300/469\n",
      "Batch: 301/469\n",
      "Batch: 302/469\n",
      "Batch: 303/469\n",
      "Batch: 304/469\n",
      "Batch: 305/469\n",
      "Batch: 306/469\n",
      "Batch: 307/469\n",
      "Batch: 308/469\n",
      "Batch: 309/469\n",
      "Batch: 310/469\n",
      "Batch: 311/469\n",
      "Batch: 312/469\n",
      "Batch: 313/469\n",
      "Batch: 314/469\n",
      "Batch: 315/469\n",
      "Batch: 316/469\n",
      "Batch: 317/469\n",
      "Batch: 318/469\n",
      "Batch: 319/469\n",
      "Batch: 320/469\n",
      "Batch: 321/469\n",
      "Batch: 322/469\n",
      "Batch: 323/469\n",
      "Batch: 324/469\n",
      "Batch: 325/469\n",
      "Batch: 326/469\n",
      "Batch: 327/469\n",
      "Batch: 328/469\n",
      "Batch: 329/469\n",
      "Batch: 330/469\n",
      "Batch: 331/469\n",
      "Batch: 332/469\n",
      "Batch: 333/469\n",
      "Batch: 334/469\n",
      "Batch: 335/469\n",
      "Batch: 336/469\n",
      "Batch: 337/469\n",
      "Batch: 338/469\n",
      "Batch: 339/469\n",
      "Batch: 340/469\n",
      "Batch: 341/469\n",
      "Batch: 342/469\n",
      "Batch: 343/469\n",
      "Batch: 344/469\n",
      "Batch: 345/469\n",
      "Batch: 346/469\n",
      "Batch: 347/469\n",
      "Batch: 348/469\n",
      "Batch: 349/469\n",
      "Batch: 350/469\n",
      "Batch: 351/469\n",
      "Batch: 352/469\n",
      "Batch: 353/469\n",
      "Batch: 354/469\n",
      "Batch: 355/469\n",
      "Batch: 356/469\n",
      "Batch: 357/469\n",
      "Batch: 358/469\n",
      "Batch: 359/469\n",
      "Batch: 360/469\n",
      "Batch: 361/469\n",
      "Batch: 362/469\n",
      "Batch: 363/469\n",
      "Batch: 364/469\n",
      "Batch: 365/469\n",
      "Batch: 366/469\n",
      "Batch: 367/469\n",
      "Batch: 368/469\n",
      "Batch: 369/469\n",
      "Batch: 370/469\n",
      "Batch: 371/469\n",
      "Batch: 372/469\n",
      "Batch: 373/469\n",
      "Batch: 374/469\n",
      "Batch: 375/469\n",
      "Batch: 376/469\n",
      "Batch: 377/469\n",
      "Batch: 378/469\n",
      "Batch: 379/469\n",
      "Batch: 380/469\n",
      "Batch: 381/469\n",
      "Batch: 382/469\n",
      "Batch: 383/469\n",
      "Batch: 384/469\n",
      "Batch: 385/469\n",
      "Batch: 386/469\n",
      "Batch: 387/469\n",
      "Batch: 388/469\n",
      "Batch: 389/469\n",
      "Batch: 390/469\n",
      "Batch: 391/469\n",
      "Batch: 392/469\n",
      "Batch: 393/469\n",
      "Batch: 394/469\n",
      "Batch: 395/469\n",
      "Batch: 396/469\n",
      "Batch: 397/469\n",
      "Batch: 398/469\n",
      "Batch: 399/469\n",
      "Batch: 400/469\n",
      "Batch: 401/469\n",
      "Batch: 402/469\n",
      "Batch: 403/469\n",
      "Batch: 404/469\n",
      "Batch: 405/469\n",
      "Batch: 406/469\n",
      "Batch: 407/469\n",
      "Batch: 408/469\n",
      "Batch: 409/469\n",
      "Batch: 410/469\n",
      "Batch: 411/469\n",
      "Batch: 412/469\n",
      "Batch: 413/469\n",
      "Batch: 414/469\n",
      "Batch: 415/469\n",
      "Batch: 416/469\n",
      "Batch: 417/469\n",
      "Batch: 418/469\n",
      "Batch: 419/469\n",
      "Batch: 420/469\n",
      "Batch: 421/469\n",
      "Batch: 422/469\n",
      "Batch: 423/469\n",
      "Batch: 424/469\n",
      "Batch: 425/469\n",
      "Batch: 426/469\n",
      "Batch: 427/469\n",
      "Batch: 428/469\n",
      "Batch: 429/469\n",
      "Batch: 430/469\n",
      "Batch: 431/469\n",
      "Batch: 432/469\n",
      "Batch: 433/469\n",
      "Batch: 434/469\n",
      "Batch: 435/469\n",
      "Batch: 436/469\n",
      "Batch: 437/469\n",
      "Batch: 438/469\n",
      "Batch: 439/469\n",
      "Batch: 440/469\n",
      "Batch: 441/469\n",
      "Batch: 442/469\n",
      "Batch: 443/469\n",
      "Batch: 444/469\n",
      "Batch: 445/469\n",
      "Batch: 446/469\n",
      "Batch: 447/469\n",
      "Batch: 448/469\n",
      "Batch: 449/469\n",
      "Batch: 450/469\n",
      "Batch: 451/469\n",
      "Batch: 452/469\n",
      "Batch: 453/469\n",
      "Batch: 454/469\n",
      "Batch: 455/469\n",
      "Batch: 456/469\n",
      "Batch: 457/469\n",
      "Batch: 458/469\n",
      "Batch: 459/469\n",
      "Batch: 460/469\n",
      "Batch: 461/469\n",
      "Batch: 462/469\n",
      "Batch: 463/469\n",
      "Batch: 464/469\n",
      "Batch: 465/469\n",
      "Batch: 466/469\n",
      "Batch: 467/469\n",
      "Batch: 468/469\n",
      "469\n",
      "M_W_post size:  torch.Size([1024, 10])\n",
      "M_b_post size:  torch.Size([10])\n",
      "C_W_post size:  torch.Size([10, 1024])\n",
      "C_b_post size:  torch.Size([10])\n",
      "preparing Gaussians took 4.822777509689331 seconds which is 0.08037962516148885 minutes\n"
     ]
    }
   ],
   "source": [
    "t0 = time.time()\n",
    "M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D = Diag_second_order(model=mnist_model,\n",
    "                                                                   train_loader=mnist_train_loader,\n",
    "                                                                   var0 = 1e-3,\n",
    "                                                                   device=device)\n",
    "t1 = time.time()\n",
    "time_gaussian = t1-t0\n",
    "print(\"preparing Gaussians took {} seconds which is {} minutes\".format(time_gaussian, time_gaussian/60))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Batch: 0/469\n",
      "Batch: 1/469\n",
      "Batch: 2/469\n",
      "Batch: 3/469\n",
      "Batch: 4/469\n",
      "Batch: 5/469\n",
      "Batch: 6/469\n",
      "Batch: 7/469\n",
      "Batch: 8/469\n",
      "Batch: 9/469\n",
      "Batch: 10/469\n",
      "Batch: 11/469\n",
      "Batch: 12/469\n",
      "Batch: 13/469\n",
      "Batch: 14/469\n",
      "Batch: 15/469\n",
      "Batch: 16/469\n",
      "Batch: 17/469\n",
      "Batch: 18/469\n",
      "Batch: 19/469\n",
      "Batch: 20/469\n",
      "Batch: 21/469\n",
      "Batch: 22/469\n",
      "Batch: 23/469\n",
      "Batch: 24/469\n",
      "Batch: 25/469\n",
      "Batch: 26/469\n",
      "Batch: 27/469\n",
      "Batch: 28/469\n",
      "Batch: 29/469\n",
      "Batch: 30/469\n",
      "Batch: 31/469\n",
      "Batch: 32/469\n",
      "Batch: 33/469\n",
      "Batch: 34/469\n",
      "Batch: 35/469\n",
      "Batch: 36/469\n",
      "Batch: 37/469\n",
      "Batch: 38/469\n",
      "Batch: 39/469\n",
      "Batch: 40/469\n",
      "Batch: 41/469\n",
      "Batch: 42/469\n",
      "Batch: 43/469\n",
      "Batch: 44/469\n",
      "Batch: 45/469\n",
      "Batch: 46/469\n",
      "Batch: 47/469\n",
      "Batch: 48/469\n",
      "Batch: 49/469\n",
      "Batch: 50/469\n",
      "Batch: 51/469\n",
      "Batch: 52/469\n",
      "Batch: 53/469\n",
      "Batch: 54/469\n",
      "Batch: 55/469\n",
      "Batch: 56/469\n",
      "Batch: 57/469\n",
      "Batch: 58/469\n",
      "Batch: 59/469\n",
      "Batch: 60/469\n",
      "Batch: 61/469\n",
      "Batch: 62/469\n",
      "Batch: 63/469\n",
      "Batch: 64/469\n",
      "Batch: 65/469\n",
      "Batch: 66/469\n",
      "Batch: 67/469\n",
      "Batch: 68/469\n",
      "Batch: 69/469\n",
      "Batch: 70/469\n",
      "Batch: 71/469\n",
      "Batch: 72/469\n",
      "Batch: 73/469\n",
      "Batch: 74/469\n",
      "Batch: 75/469\n",
      "Batch: 76/469\n",
      "Batch: 77/469\n",
      "Batch: 78/469\n",
      "Batch: 79/469\n",
      "Batch: 80/469\n",
      "Batch: 81/469\n",
      "Batch: 82/469\n",
      "Batch: 83/469\n",
      "Batch: 84/469\n",
      "Batch: 85/469\n",
      "Batch: 86/469\n",
      "Batch: 87/469\n",
      "Batch: 88/469\n",
      "Batch: 89/469\n",
      "Batch: 90/469\n",
      "Batch: 91/469\n",
      "Batch: 92/469\n",
      "Batch: 93/469\n",
      "Batch: 94/469\n",
      "Batch: 95/469\n",
      "Batch: 96/469\n",
      "Batch: 97/469\n",
      "Batch: 98/469\n",
      "Batch: 99/469\n",
      "Batch: 100/469\n",
      "Batch: 101/469\n",
      "Batch: 102/469\n",
      "Batch: 103/469\n",
      "Batch: 104/469\n",
      "Batch: 105/469\n",
      "Batch: 106/469\n",
      "Batch: 107/469\n",
      "Batch: 108/469\n",
      "Batch: 109/469\n",
      "Batch: 110/469\n",
      "Batch: 111/469\n",
      "Batch: 112/469\n",
      "Batch: 113/469\n",
      "Batch: 114/469\n",
      "Batch: 115/469\n",
      "Batch: 116/469\n",
      "Batch: 117/469\n",
      "Batch: 118/469\n",
      "Batch: 119/469\n",
      "Batch: 120/469\n",
      "Batch: 121/469\n",
      "Batch: 122/469\n",
      "Batch: 123/469\n",
      "Batch: 124/469\n",
      "Batch: 125/469\n",
      "Batch: 126/469\n",
      "Batch: 127/469\n",
      "Batch: 128/469\n",
      "Batch: 129/469\n",
      "Batch: 130/469\n",
      "Batch: 131/469\n",
      "Batch: 132/469\n",
      "Batch: 133/469\n",
      "Batch: 134/469\n",
      "Batch: 135/469\n",
      "Batch: 136/469\n",
      "Batch: 137/469\n",
      "Batch: 138/469\n",
      "Batch: 139/469\n",
      "Batch: 140/469\n",
      "Batch: 141/469\n",
      "Batch: 142/469\n",
      "Batch: 143/469\n",
      "Batch: 144/469\n",
      "Batch: 145/469\n",
      "Batch: 146/469\n",
      "Batch: 147/469\n",
      "Batch: 148/469\n",
      "Batch: 149/469\n",
      "Batch: 150/469\n",
      "Batch: 151/469\n",
      "Batch: 152/469\n",
      "Batch: 153/469\n",
      "Batch: 154/469\n",
      "Batch: 155/469\n",
      "Batch: 156/469\n",
      "Batch: 157/469\n",
      "Batch: 158/469\n",
      "Batch: 159/469\n",
      "Batch: 160/469\n",
      "Batch: 161/469\n",
      "Batch: 162/469\n",
      "Batch: 163/469\n",
      "Batch: 164/469\n",
      "Batch: 165/469\n",
      "Batch: 166/469\n",
      "Batch: 167/469\n",
      "Batch: 168/469\n",
      "Batch: 169/469\n",
      "Batch: 170/469\n",
      "Batch: 171/469\n",
      "Batch: 172/469\n",
      "Batch: 173/469\n",
      "Batch: 174/469\n",
      "Batch: 175/469\n",
      "Batch: 176/469\n",
      "Batch: 177/469\n",
      "Batch: 178/469\n",
      "Batch: 179/469\n",
      "Batch: 180/469\n",
      "Batch: 181/469\n",
      "Batch: 182/469\n",
      "Batch: 183/469\n",
      "Batch: 184/469\n",
      "Batch: 185/469\n",
      "Batch: 186/469\n",
      "Batch: 187/469\n",
      "Batch: 188/469\n",
      "Batch: 189/469\n",
      "Batch: 190/469\n",
      "Batch: 191/469\n",
      "Batch: 192/469\n",
      "Batch: 193/469\n",
      "Batch: 194/469\n",
      "Batch: 195/469\n",
      "Batch: 196/469\n",
      "Batch: 197/469\n",
      "Batch: 198/469\n",
      "Batch: 199/469\n",
      "Batch: 200/469\n",
      "Batch: 201/469\n",
      "Batch: 202/469\n",
      "Batch: 203/469\n",
      "Batch: 204/469\n",
      "Batch: 205/469\n",
      "Batch: 206/469\n",
      "Batch: 207/469\n",
      "Batch: 208/469\n",
      "Batch: 209/469\n",
      "Batch: 210/469\n",
      "Batch: 211/469\n",
      "Batch: 212/469\n",
      "Batch: 213/469\n",
      "Batch: 214/469\n",
      "Batch: 215/469\n",
      "Batch: 216/469\n",
      "Batch: 217/469\n",
      "Batch: 218/469\n",
      "Batch: 219/469\n",
      "Batch: 220/469\n",
      "Batch: 221/469\n",
      "Batch: 222/469\n",
      "Batch: 223/469\n",
      "Batch: 224/469\n",
      "Batch: 225/469\n",
      "Batch: 226/469\n",
      "Batch: 227/469\n",
      "Batch: 228/469\n",
      "Batch: 229/469\n",
      "Batch: 230/469\n",
      "Batch: 231/469\n",
      "Batch: 232/469\n",
      "Batch: 233/469\n",
      "Batch: 234/469\n",
      "Batch: 235/469\n",
      "Batch: 236/469\n",
      "Batch: 237/469\n",
      "Batch: 238/469\n",
      "Batch: 239/469\n",
      "Batch: 240/469\n",
      "Batch: 241/469\n",
      "Batch: 242/469\n",
      "Batch: 243/469\n",
      "Batch: 244/469\n",
      "Batch: 245/469\n",
      "Batch: 246/469\n",
      "Batch: 247/469\n",
      "Batch: 248/469\n",
      "Batch: 249/469\n",
      "Batch: 250/469\n",
      "Batch: 251/469\n",
      "Batch: 252/469\n",
      "Batch: 253/469\n",
      "Batch: 254/469\n",
      "Batch: 255/469\n",
      "Batch: 256/469\n",
      "Batch: 257/469\n",
      "Batch: 258/469\n",
      "Batch: 259/469\n",
      "Batch: 260/469\n",
      "Batch: 261/469\n",
      "Batch: 262/469\n",
      "Batch: 263/469\n",
      "Batch: 264/469\n",
      "Batch: 265/469\n",
      "Batch: 266/469\n",
      "Batch: 267/469\n",
      "Batch: 268/469\n",
      "Batch: 269/469\n",
      "Batch: 270/469\n",
      "Batch: 271/469\n",
      "Batch: 272/469\n",
      "Batch: 273/469\n",
      "Batch: 274/469\n",
      "Batch: 275/469\n",
      "Batch: 276/469\n",
      "Batch: 277/469\n",
      "Batch: 278/469\n",
      "Batch: 279/469\n",
      "Batch: 280/469\n",
      "Batch: 281/469\n",
      "Batch: 282/469\n",
      "Batch: 283/469\n",
      "Batch: 284/469\n",
      "Batch: 285/469\n",
      "Batch: 286/469\n",
      "Batch: 287/469\n",
      "Batch: 288/469\n",
      "Batch: 289/469\n",
      "Batch: 290/469\n",
      "Batch: 291/469\n",
      "Batch: 292/469\n",
      "Batch: 293/469\n",
      "Batch: 294/469\n",
      "Batch: 295/469\n",
      "Batch: 296/469\n",
      "Batch: 297/469\n",
      "Batch: 298/469\n",
      "Batch: 299/469\n",
      "Batch: 300/469\n",
      "Batch: 301/469\n",
      "Batch: 302/469\n",
      "Batch: 303/469\n",
      "Batch: 304/469\n",
      "Batch: 305/469\n",
      "Batch: 306/469\n",
      "Batch: 307/469\n",
      "Batch: 308/469\n",
      "Batch: 309/469\n",
      "Batch: 310/469\n",
      "Batch: 311/469\n",
      "Batch: 312/469\n",
      "Batch: 313/469\n",
      "Batch: 314/469\n",
      "Batch: 315/469\n",
      "Batch: 316/469\n",
      "Batch: 317/469\n",
      "Batch: 318/469\n",
      "Batch: 319/469\n",
      "Batch: 320/469\n",
      "Batch: 321/469\n",
      "Batch: 322/469\n",
      "Batch: 323/469\n",
      "Batch: 324/469\n",
      "Batch: 325/469\n",
      "Batch: 326/469\n",
      "Batch: 327/469\n",
      "Batch: 328/469\n",
      "Batch: 329/469\n",
      "Batch: 330/469\n",
      "Batch: 331/469\n",
      "Batch: 332/469\n",
      "Batch: 333/469\n",
      "Batch: 334/469\n",
      "Batch: 335/469\n",
      "Batch: 336/469\n",
      "Batch: 337/469\n",
      "Batch: 338/469\n",
      "Batch: 339/469\n",
      "Batch: 340/469\n",
      "Batch: 341/469\n",
      "Batch: 342/469\n",
      "Batch: 343/469\n",
      "Batch: 344/469\n",
      "Batch: 345/469\n",
      "Batch: 346/469\n",
      "Batch: 347/469\n",
      "Batch: 348/469\n",
      "Batch: 349/469\n",
      "Batch: 350/469\n",
      "Batch: 351/469\n",
      "Batch: 352/469\n",
      "Batch: 353/469\n",
      "Batch: 354/469\n",
      "Batch: 355/469\n",
      "Batch: 356/469\n",
      "Batch: 357/469\n",
      "Batch: 358/469\n",
      "Batch: 359/469\n",
      "Batch: 360/469\n",
      "Batch: 361/469\n",
      "Batch: 362/469\n",
      "Batch: 363/469\n",
      "Batch: 364/469\n",
      "Batch: 365/469\n",
      "Batch: 366/469\n",
      "Batch: 367/469\n",
      "Batch: 368/469\n",
      "Batch: 369/469\n",
      "Batch: 370/469\n",
      "Batch: 371/469\n",
      "Batch: 372/469\n",
      "Batch: 373/469\n",
      "Batch: 374/469\n",
      "Batch: 375/469\n",
      "Batch: 376/469\n",
      "Batch: 377/469\n",
      "Batch: 378/469\n",
      "Batch: 379/469\n",
      "Batch: 380/469\n",
      "Batch: 381/469\n",
      "Batch: 382/469\n",
      "Batch: 383/469\n",
      "Batch: 384/469\n",
      "Batch: 385/469\n",
      "Batch: 386/469\n",
      "Batch: 387/469\n",
      "Batch: 388/469\n",
      "Batch: 389/469\n",
      "Batch: 390/469\n",
      "Batch: 391/469\n",
      "Batch: 392/469\n",
      "Batch: 393/469\n",
      "Batch: 394/469\n",
      "Batch: 395/469\n",
      "Batch: 396/469\n",
      "Batch: 397/469\n",
      "Batch: 398/469\n",
      "Batch: 399/469\n",
      "Batch: 400/469\n",
      "Batch: 401/469\n",
      "Batch: 402/469\n",
      "Batch: 403/469\n",
      "Batch: 404/469\n",
      "Batch: 405/469\n",
      "Batch: 406/469\n",
      "Batch: 407/469\n",
      "Batch: 408/469\n",
      "Batch: 409/469\n",
      "Batch: 410/469\n",
      "Batch: 411/469\n",
      "Batch: 412/469\n",
      "Batch: 413/469\n",
      "Batch: 414/469\n",
      "Batch: 415/469\n",
      "Batch: 416/469\n",
      "Batch: 417/469\n",
      "Batch: 418/469\n",
      "Batch: 419/469\n",
      "Batch: 420/469\n",
      "Batch: 421/469\n",
      "Batch: 422/469\n",
      "Batch: 423/469\n",
      "Batch: 424/469\n",
      "Batch: 425/469\n",
      "Batch: 426/469\n",
      "Batch: 427/469\n",
      "Batch: 428/469\n",
      "Batch: 429/469\n",
      "Batch: 430/469\n",
      "Batch: 431/469\n",
      "Batch: 432/469\n",
      "Batch: 433/469\n",
      "Batch: 434/469\n",
      "Batch: 435/469\n",
      "Batch: 436/469\n",
      "Batch: 437/469\n",
      "Batch: 438/469\n",
      "Batch: 439/469\n",
      "Batch: 440/469\n",
      "Batch: 441/469\n",
      "Batch: 442/469\n",
      "Batch: 443/469\n",
      "Batch: 444/469\n",
      "Batch: 445/469\n",
      "Batch: 446/469\n",
      "Batch: 447/469\n",
      "Batch: 448/469\n",
      "Batch: 449/469\n",
      "Batch: 450/469\n",
      "Batch: 451/469\n",
      "Batch: 452/469\n",
      "Batch: 453/469\n",
      "Batch: 454/469\n",
      "Batch: 455/469\n",
      "Batch: 456/469\n",
      "Batch: 457/469\n",
      "Batch: 458/469\n",
      "Batch: 459/469\n",
      "Batch: 460/469\n",
      "Batch: 461/469\n",
      "Batch: 462/469\n",
      "Batch: 463/469\n",
      "Batch: 464/469\n",
      "Batch: 465/469\n",
      "Batch: 466/469\n",
      "Batch: 467/469\n",
      "Batch: 468/469\n",
      "M_W_post size:  torch.Size([1024, 10])\n",
      "M_b_post size:  torch.Size([10])\n",
      "U_post size:  torch.Size([10, 10])\n",
      "V_post size:  torch.Size([1024, 1024])\n",
      "B_post size:  torch.Size([10, 10])\n"
     ]
    }
   ],
   "source": [
    "M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K = KFLP_second_order(model=mnist_model,\n",
    "                                                               train_loader=mnist_train_loader,\n",
    "                                                               var0 = 5e-2,\n",
    "                                                               device=device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# MAP estimate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "targets = MNIST_test.targets.numpy()\n",
    "targets_FMNIST = FMNIST_test.targets.numpy()\n",
    "targets_notMNIST = notMNIST_test.targets.numpy().astype(int)\n",
    "targets_KMNIST = KMNIST_test.targets.numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist_test_in_MAP = predict_MAP(mnist_model, mnist_test_loader, cuda=True).cpu().numpy()\n",
    "mnist_test_out_fmnist_MAP = predict_MAP(mnist_model, FMNIST_test_loader, cuda=True).cpu().numpy()\n",
    "mnist_test_out_notMNIST_MAP = predict_MAP(mnist_model, not_mnist_test_loader, cuda=True).cpu().numpy()\n",
    "mnist_test_out_KMNIST_MAP = predict_MAP(mnist_model, KMNIST_test_loader, cuda=True).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_in_MAP, prob_correct_in_MAP, ent_in_MAP, MMC_in_MAP = get_in_dist_values(mnist_test_in_MAP, targets)\n",
    "acc_out_FMNIST_MAP, prob_correct_out_FMNIST_MAP, ent_out_FMNIST_MAP, MMC_out_FMNIST_MAP, auroc_out_FMNIST_MAP = get_out_dist_values(mnist_test_in_MAP, mnist_test_out_fmnist_MAP, targets_FMNIST)\n",
    "acc_out_notMNIST_MAP, prob_correct_out_notMNIST_MAP, ent_out_notMNIST_MAP, MMC_out_notMNIST_MAP, auroc_out_notMNIST_MAP = get_out_dist_values(mnist_test_in_MAP, mnist_test_out_notMNIST_MAP, targets_notMNIST)\n",
    "acc_out_KMNIST_MAP, prob_correct_out_KMNIST_MAP, ent_out_KMNIST_MAP, MMC_out_KMNIST_MAP, auroc_out_KMNIST_MAP = get_out_dist_values(mnist_test_in_MAP, mnist_test_out_KMNIST_MAP, targets_KMNIST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[In, MAP, mnist] Accuracy: 0.989; average entropy: 0.044;     MMC: 0.986; Prob @ correct: 0.100\n",
      "[Out-MAP, KFAC, FMNIST] Accuracy: 0.063; Average entropy: 1.101;    MMC: 0.604; AUROC: 0.984; Prob @ correct: 0.100\n",
      "[Out-MAP, KFAC, notMNIST] Accuracy: 0.134; Average entropy: 0.609;    MMC: 0.777; AUROC: 0.914; Prob @ correct: 0.100\n",
      "[Out-MAP, KFAC, KMNIST] Accuracy: 0.093; Average entropy: 0.725;    MMC: 0.732; AUROC: 0.962; Prob @ correct: 0.100\n"
     ]
    }
   ],
   "source": [
    "print_in_dist_values(acc_in_MAP, prob_correct_in_MAP, ent_in_MAP, MMC_in_MAP, 'mnist', 'MAP')\n",
    "print_out_dist_values(acc_out_FMNIST_MAP, prob_correct_out_FMNIST_MAP, ent_out_FMNIST_MAP, MMC_out_FMNIST_MAP, auroc_out_FMNIST_MAP, 'FMNIST', 'MAP')\n",
    "print_out_dist_values(acc_out_notMNIST_MAP, prob_correct_out_notMNIST_MAP, ent_out_notMNIST_MAP, MMC_out_notMNIST_MAP, auroc_out_notMNIST_MAP, 'notMNIST', 'MAP')\n",
    "print_out_dist_values(acc_out_KMNIST_MAP, prob_correct_out_KMNIST_MAP, ent_out_KMNIST_MAP, MMC_out_KMNIST_MAP, auroc_out_KMNIST_MAP, 'KMNIST', 'MAP')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "accuracy: 0.989 with std 0.001\n",
      "MMC in: 0.988 with std 0.001\n",
      "MMC out fmnist: 0.584 with std 0.022\n",
      "MMC out notmnist: 0.773 with std 0.012\n",
      "MMC out kmnist: 0.723 with std 0.005\n",
      "AUROC out fmnist: 0.987 with std 0.002\n",
      "AUROC out notmnist: 0.930 with std 0.011\n",
      "AUROC out kmnist: 0.967 with std 0.003\n"
     ]
    }
   ],
   "source": [
    "#MAP estimate\n",
    "#seeds are 123,124,125,126,127\n",
    "acc_in = [0.989, 0.989, 0.991, 0.988, 0.989]\n",
    "mmc_in = [0.988, 0.986, 0.988, 0.988, 0.988]\n",
    "mmc_out_fmnist = [0.591, 0.604, 0.543, 0.600, 0.581]\n",
    "mmc_out_notmnist = [0.750, 0.777, 0.775, 0.782, 0.783]\n",
    "mmc_out_kmnist = [0.717, 0.732, 0.721, 0.725, 0.718]\n",
    "\n",
    "auroc_out_fmnist = [0.988, 0.984, 0.991, 0.986, 0.988]\n",
    "auroc_out_notmnist = [0.947, 0.914, 0.937, 0.931, 0.923]\n",
    "auroc_out_kmnist = [0.969, 0.962, 0.968, 0.968, 0.968]\n",
    "\n",
    "print(\"accuracy: {:.03f} with std {:.03f}\".format(np.mean(acc_in), np.std(acc_in)))\n",
    "\n",
    "print(\"MMC in: {:.03f} with std {:.03f}\".format(np.mean(mmc_in), np.std(mmc_in)))\n",
    "print(\"MMC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_fmnist), np.std(mmc_out_fmnist)))\n",
    "print(\"MMC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_notmnist), np.std(mmc_out_notmnist)))\n",
    "print(\"MMC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_kmnist), np.std(mmc_out_kmnist)))\n",
    "\n",
    "print(\"AUROC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_fmnist), np.std(auroc_out_fmnist)))\n",
    "print(\"AUROC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_notmnist), np.std(auroc_out_notmnist)))\n",
    "print(\"AUROC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_kmnist), np.std(auroc_out_kmnist)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_samples = 1000"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Diag Hessian Sampling estimate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "time used for sampling with 1000 samples: 6.542214870452881\n",
      "time used for sampling with 1000 samples: 7.690843105316162\n",
      "time used for sampling with 1000 samples: 34.59974789619446\n",
      "time used for sampling with 1000 samples: 13.62425947189331\n"
     ]
    }
   ],
   "source": [
    "mnist_test_in_D = predict_diagonal_sampling(mnist_model, mnist_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True, n_samples=num_samples).cpu().numpy()\n",
    "mnist_test_out_FMNIST_D = predict_diagonal_sampling(mnist_model, FMNIST_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True, n_samples=num_samples).cpu().numpy()\n",
    "mnist_test_out_notMNIST_D = predict_diagonal_sampling(mnist_model, not_mnist_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True, n_samples=num_samples).cpu().numpy()\n",
    "mnist_test_out_KMNIST_D = predict_diagonal_sampling(mnist_model, KMNIST_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True, n_samples=num_samples).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_in_D, prob_correct_in_D, ent_in_D, MMC_in_D = get_in_dist_values(mnist_test_in_D, targets)\n",
    "acc_out_FMNIST_D, prob_correct_out_FMNIST_D, ent_out_FMNIST_D, MMC_out_FMNIST_D, auroc_out_FMNIST_D = get_out_dist_values(mnist_test_in_D, mnist_test_out_FMNIST_D, targets_FMNIST)\n",
    "acc_out_notMNIST_D, prob_correct_out_notMNIST_D, ent_out_notMNIST_D, MMC_out_notMNIST_D, auroc_out_notMNIST_D = get_out_dist_values(mnist_test_in_D, mnist_test_out_notMNIST_D, targets_notMNIST)\n",
    "acc_out_KMNIST_D, prob_correct_out_KMNIST_D, ent_out_KMNIST_D, MMC_out_KMNIST_D, auroc_out_KMNIST_D = get_out_dist_values(mnist_test_in_D, mnist_test_out_KMNIST_D, targets_KMNIST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[In, Diag, MNIST] Accuracy: 0.989; average entropy: 0.080;     MMC: 0.976; Prob @ correct: 0.100\n",
      "[Out-fmnist, Diag, MNIST] Accuracy: 0.064; Average entropy: 1.187;    MMC: 0.570; AUROC: 0.980; Prob @ correct: 0.100\n",
      "[Out-notMNIST, Diag, MNIST] Accuracy: 0.134; Average entropy: 0.731;    MMC: 0.730; AUROC: 0.907; Prob @ correct: 0.100\n",
      "[Out-KMNIST, Diag, MNIST] Accuracy: 0.093; Average entropy: 0.886;    MMC: 0.666; AUROC: 0.960; Prob @ correct: 0.100\n"
     ]
    }
   ],
   "source": [
    "print_in_dist_values(acc_in_D, prob_correct_in_D, ent_in_D, MMC_in_D, 'MNIST', 'Diag')\n",
    "print_out_dist_values(acc_out_FMNIST_D, prob_correct_out_FMNIST_D, ent_out_FMNIST_D, MMC_out_FMNIST_D, auroc_out_FMNIST_D, 'MNIST', test='fmnist', method='Diag')\n",
    "print_out_dist_values(acc_out_notMNIST_D, prob_correct_out_notMNIST_D, ent_out_notMNIST_D, MMC_out_notMNIST_D, auroc_out_notMNIST_D, 'MNIST', test='notMNIST', method='Diag')\n",
    "print_out_dist_values(acc_out_KMNIST_D, prob_correct_out_KMNIST_D, ent_out_KMNIST_D, MMC_out_KMNIST_D, auroc_out_KMNIST_D, 'MNIST', test='KMNIST', method='Diag')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Diagonal Sampling time in: 7.197 with std 0.129\n",
      "Diagonal Sampling time out fmnist: 7.535 with std 0.324\n",
      "Diagonal Sampling time out notmnist: 18.622 with std 1.284\n",
      "Diagonal Sampling time out kmnist: 9.316 with std 0.193\n",
      "accuracy: 0.989 with std 0.001\n",
      "MMC in: 0.979 with std 0.001\n",
      "MMC out fmnist: 0.551 with std 0.021\n",
      "MMC out notmnist: 0.722 with std 0.014\n",
      "MMC out kmnist: 0.658 with std 0.006\n",
      "AUROC out fmnist: 0.984 with std 0.003\n",
      "AUROC out notmnist: 0.927 with std 0.013\n",
      "AUROC out kmnist: 0.965 with std 0.003\n"
     ]
    }
   ],
   "source": [
    "#Diag Sampling (1000)\n",
    "#seeds are 123,124,125,126,127\n",
    "time_diag_in = [7.236505508422852, 7.125160217285156, 7.238185882568359, 7.385348320007324, 7.000167608261108]\n",
    "time_diag_out_fmnist = [7.219660520553589, 8.032358407974243, 7.777910232543945, 7.446442604064941, 7.200972318649292]\n",
    "time_diag_out_notmnist = [20.68891930580139, 19.327053546905518, 18.424328804016113, 17.584392786026, 17.085043907165527]\n",
    "time_diag_out_kmnist = [9.627353191375732, 9.133541345596313, 9.307446718215942, 9.410037755966187, 9.100234746932983]\n",
    "\n",
    "acc_in = [0.989, 0.989, 0.990, 0.988, 0.989]\n",
    "mmc_in = [0.980, 0.976, 0.979, 0.980, 0.979]\n",
    "mmc_out_fmnist = [0.558, 0.570, 0.511, 0.566, 0.550]\n",
    "mmc_out_notmnist = [0.695, 0.730, 0.721, 0.735, 0.731]\n",
    "mmc_out_kmnist = [0.653, 0.666, 0.654, 0.664, 0.654]\n",
    "\n",
    "auroc_out_fmnist = [0.985, 0.980, 0.988, 0.983, 0.985]\n",
    "auroc_out_notmnist = [0.944, 0.907, 0.935, 0.927, 0.921]\n",
    "auroc_out_kmnist = [0.968, 0.960, 0.967, 0.966, 0.966]\n",
    "\n",
    "print(\"Diagonal Sampling time in: {:.03f} with std {:.03f}\".format(np.mean(time_diag_in), np.std(time_diag_in)))\n",
    "print(\"Diagonal Sampling time out fmnist: {:.03f} with std {:.03f}\".format(np.mean(time_diag_out_fmnist), np.std(time_diag_out_fmnist)))\n",
    "print(\"Diagonal Sampling time out notmnist: {:.03f} with std {:.03f}\".format(np.mean(time_diag_out_notmnist), np.std(time_diag_out_notmnist)))\n",
    "print(\"Diagonal Sampling time out kmnist: {:.03f} with std {:.03f}\".format(np.mean(time_diag_out_kmnist), np.std(time_diag_out_kmnist)))\n",
    "\n",
    "print(\"accuracy: {:.03f} with std {:.03f}\".format(np.mean(acc_in), np.std(acc_in)))\n",
    "\n",
    "print(\"MMC in: {:.03f} with std {:.03f}\".format(np.mean(mmc_in), np.std(mmc_in)))\n",
    "print(\"MMC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_fmnist), np.std(mmc_out_fmnist)))\n",
    "print(\"MMC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_notmnist), np.std(mmc_out_notmnist)))\n",
    "print(\"MMC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_kmnist), np.std(mmc_out_kmnist)))\n",
    "\n",
    "print(\"AUROC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_fmnist), np.std(auroc_out_fmnist)))\n",
    "print(\"AUROC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_notmnist), np.std(auroc_out_notmnist)))\n",
    "print(\"AUROC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_kmnist), np.std(auroc_out_kmnist)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# KFAC Laplace Approximation (sampling)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "ename": "RuntimeError",
     "evalue": "size mismatch, m1: [128 x 1024], m2: [10 x 10] at /opt/conda/conda-bld/pytorch_1587428266983/work/aten/src/THC/generic/THCTensorMathBlas.cu:283",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-28-1ae44b238522>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mmnist_test_in_KFAC\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpredict_KFAC_sampling\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmnist_model\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmnist_test_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM_W_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM_b_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mU_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mV_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mB_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcuda\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtiming\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      2\u001b[0m \u001b[0mmnist_test_out_FMNIST_KFAC\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpredict_KFAC_sampling\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmnist_model\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFMNIST_test_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM_W_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM_b_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mU_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mV_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mB_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcuda\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtiming\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m \u001b[0mmnist_test_out_notMNIST_KFAC\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpredict_KFAC_sampling\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmnist_model\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnot_mnist_test_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM_W_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM_b_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mU_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mV_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mB_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcuda\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtiming\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m \u001b[0mmnist_test_out_KMNIST_KFAC\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpredict_KFAC_sampling\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmnist_model\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mKMNIST_test_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM_W_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mM_b_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mU_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mV_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mB_post_K\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcuda\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtiming\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/lib/python3.7/site-packages/torch/autograd/grad_mode.py\u001b[0m in \u001b[0;36mdecorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m     13\u001b[0m         \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     14\u001b[0m             \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m                 \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     16\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/Desktop/master2020/2019-10-Laplace_Bridge/exp/DirLPA_utils.py\u001b[0m in \u001b[0;36mpredict_KFAC_sampling\u001b[0;34m(model, test_loader, M_W_post, M_b_post, U_post, V_post, B_post, n_samples, timing, verbose, cuda)\u001b[0m\n\u001b[1;32m    276\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    277\u001b[0m         \u001b[0mmu_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mphi\u001b[0m \u001b[0;34m@\u001b[0m \u001b[0mM_W_post\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mM_b_post\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 278\u001b[0;31m         \u001b[0mCov_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdiag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mphi\u001b[0m \u001b[0;34m@\u001b[0m \u001b[0mU_post\u001b[0m \u001b[0;34m@\u001b[0m \u001b[0mphi\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mV_post\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munsqueeze\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mB_post\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munsqueeze\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    279\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    280\u001b[0m         \u001b[0mpost_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mMultivariateNormal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmu_pred\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mCov_pred\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mRuntimeError\u001b[0m: size mismatch, m1: [128 x 1024], m2: [10 x 10] at /opt/conda/conda-bld/pytorch_1587428266983/work/aten/src/THC/generic/THCTensorMathBlas.cu:283"
     ]
    }
   ],
   "source": [
    "mnist_test_in_KFAC = predict_KFAC_sampling(mnist_model, mnist_test_loader, M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K, verbose=False, cuda=True, timing=True, n_samples=num_samples).cpu().numpy()\n",
    "mnist_test_out_FMNIST_KFAC = predict_KFAC_sampling(mnist_model, FMNIST_test_loader, M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K, verbose=False, cuda=True, timing=True, n_samples=num_samples).cpu().numpy()\n",
    "mnist_test_out_notMNIST_KFAC = predict_KFAC_sampling(mnist_model, not_mnist_test_loader, M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K, verbose=False, cuda=True, timing=True, n_samples=num_samples).cpu().numpy()\n",
    "mnist_test_out_KMNIST_KFAC = predict_KFAC_sampling(mnist_model, KMNIST_test_loader, M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K, verbose=False, cuda=True, timing=True, n_samples=num_samples).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_in_KFAC, prob_correct_in_KFAC, ent_in_KFAC, MMC_in_KFAC = get_in_dist_values(mnist_test_in_KFAC, targets)\n",
    "acc_out_FMNIST_KFAC, prob_correct_out_FMNIST_KFAC, ent_out_FMNIST_KFAC, MMC_out_FMNIST_KFAC, auroc_out_FMNIST_KFAC = get_out_dist_values(mnist_test_in_KFAC, mnist_test_out_FMNIST_KFAC, targets_FMNIST)\n",
    "acc_out_notMNIST_KFAC, prob_correct_out_notMNIST_KFAC, ent_out_notMNIST_KFAC, MMC_out_notMNIST_KFAC, auroc_out_notMNIST_KFAC = get_out_dist_values(mnist_test_in_KFAC, mnist_test_out_notMNIST_KFAC, targets_notMNIST)\n",
    "acc_out_KMNIST_KFAC, prob_correct_out_KMNIST_KFAC, ent_out_KMNIST_KFAC, MMC_out_KMNIST_KFAC, auroc_out_KMNIST_KFAC = get_out_dist_values(mnist_test_in_KFAC, mnist_test_out_KMNIST_KFAC, targets_KMNIST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print_in_dist_values(acc_in_KFAC, prob_correct_in_KFAC, ent_in_KFAC, MMC_in_KFAC, 'MNIST', 'KFAC')\n",
    "print_out_dist_values(acc_out_FMNIST_KFAC, prob_correct_out_FMNIST_KFAC, ent_out_FMNIST_KFAC, MMC_out_FMNIST_KFAC, auroc_out_FMNIST_KFAC, 'MNIST', test='fmnist', method='KFAC')\n",
    "print_out_dist_values(acc_out_notMNIST_KFAC, prob_correct_out_notMNIST_KFAC, ent_out_notMNIST_KFAC, MMC_out_notMNIST_KFAC, auroc_out_notMNIST_KFAC, 'MNIST', test='notMNIST', method='KFAC')\n",
    "print_out_dist_values(acc_out_KMNIST_KFAC, prob_correct_out_KMNIST_KFAC, ent_out_KMNIST_KFAC, MMC_out_KMNIST_KFAC, auroc_out_KMNIST_KFAC, 'MNIST', test='KMNIST', method='KFAC')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Laplace Bridge"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist_test_in_LB = predict_LB(mnist_model, mnist_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_FMNIST_LB = predict_LB(mnist_model, FMNIST_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_notMNIST_LB = predict_LB(mnist_model, not_mnist_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_KMNIST_LB = predict_LB(mnist_model, KMNIST_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist_test_in_LBn = mnist_test_in_LB/mnist_test_in_LB.sum(1).reshape(-1,1)\n",
    "mnist_test_out_FMNIST_LBn = mnist_test_out_FMNIST_LB/mnist_test_out_FMNIST_LB.sum(1).reshape(-1,1)\n",
    "mnist_test_out_notMNIST_LBn = mnist_test_out_notMNIST_LB/mnist_test_out_notMNIST_LB.sum(1).reshape(-1,1)\n",
    "mnist_test_out_KMNIST_LBn = mnist_test_out_KMNIST_LB/mnist_test_out_KMNIST_LB.sum(1).reshape(-1,1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_in_LBn, prob_correct_in_LBn, ent_in_LBn, MMC_in_LBn = get_in_dist_values(mnist_test_in_LBn, targets)\n",
    "acc_out_FMNIST_LBn, prob_correct_out_FMNIST_LBn, ent_out_FMNIST_LBn, MMC_out_FMNIST_LBn, auroc_out_FMNIST_LBn = get_out_dist_values(mnist_test_in_LBn, mnist_test_out_FMNIST_LBn, targets_FMNIST)\n",
    "acc_out_notMNIST_LBn, prob_correct_out_notMNIST_LBn, ent_out_notMNIST_LBn, MMC_out_notMNIST_LBn, auroc_out_notMNIST_LBn = get_out_dist_values(mnist_test_in_LBn, mnist_test_out_notMNIST_LBn, targets_notMNIST)\n",
    "acc_out_KMNIST_LBn, prob_correct_out_KMNIST_LBn, ent_out_KMNIST_LBn, MMC_out_KMNIST_LBn, auroc_out_KMNIST_LBn = get_out_dist_values(mnist_test_in_LBn, mnist_test_out_KMNIST_LBn, targets_KMNIST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print_in_dist_values(acc_in_LBn, prob_correct_in_LBn, ent_in_LBn, MMC_in_LBn, 'MNIST', 'mnist', 'LBn')\n",
    "print_out_dist_values(acc_out_FMNIST_LBn, prob_correct_out_FMNIST_LBn, ent_out_FMNIST_LBn, MMC_out_FMNIST_LBn, auroc_out_FMNIST_LBn, 'MNIST', test='fmnist', method='LBn')\n",
    "print_out_dist_values(acc_out_notMNIST_LBn, prob_correct_out_notMNIST_LBn, ent_out_notMNIST_LBn, MMC_out_notMNIST_LBn, auroc_out_notMNIST_LBn, 'MNIST', test='notMNIST', method='LBn')\n",
    "print_out_dist_values(acc_out_KMNIST_LBn, prob_correct_out_KMNIST_LBn, ent_out_KMNIST_LBn, MMC_out_KMNIST_LBn, auroc_out_KMNIST_LBn, 'MNIST', test='KMNIST', method='LBn')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Laplace Bridge\n",
    "#seeds are 123,124,125,126,127\n",
    "time_lpb_in = [0.01302, 0.01731, 0.01489, 0.01529, 0.01574]\n",
    "time_lpb_out_fmnist = [0.01256, 0.01531, 0.01669, 0.01703, 0.01500]\n",
    "time_lpb_out_notmnist = [0.02332, 0.03115, 0.02666, 0.02756, 0.02864]\n",
    "time_lpb_out_kmnist = [0.01287, 0.01520, 0.01636, 0.01687, 0.01438]\n",
    "\n",
    "acc_in = [0.989, 0.989, 0.991, 0.988, 0.989]\n",
    "mmc_in = [0.988, 0.986, 0.988, 0.988, 0.988]\n",
    "mmc_out_fmnist = [0.493, 0.523, 0.433, 0.514, 0.488]\n",
    "mmc_out_notmnist = [0.735, 0.759, 0.756, 0.767, 0.769]\n",
    "mmc_out_kmnist = [0.699, 0.722, 0.707, 0.713, 0.703]\n",
    "\n",
    "auroc_out_fmnist = [0.991, 0.988, 0.993, 0.989, 0.990]\n",
    "auroc_out_notmnist = [0.948, 0.915, 0.938, 0.932, 0.924]\n",
    "auroc_out_kmnist = [0.970, 0.963, 0.969, 0.968, 0.968]\n",
    "\n",
    "print(\"Laplace Bridge time in: {:.03f} with std {:.03f}\".format(np.mean(time_lpb_in), np.std(time_lpb_in)))\n",
    "print(\"Laplace Bridge time out fmnist: {:.03f} with std {:.03f}\".format(np.mean(time_lpb_out_fmnist), np.std(time_lpb_out_fmnist)))\n",
    "print(\"Laplace Bridge time out notmnist: {:.03f} with std {:.03f}\".format(np.mean(time_lpb_out_notmnist), np.std(time_lpb_out_notmnist)))\n",
    "print(\"Laplace Bridge time out kmnist: {:.03f} with std {:.03f}\".format(np.mean(time_lpb_out_kmnist), np.std(time_lpb_out_kmnist)))\n",
    "\n",
    "print(\"accuracy: {:.03f} with std {:.03f}\".format(np.mean(acc_in), np.std(acc_in)))\n",
    "\n",
    "print(\"MMC in: {:.03f} with std {:.03f}\".format(np.mean(mmc_in), np.std(mmc_in)))\n",
    "print(\"MMC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_fmnist), np.std(mmc_out_fmnist)))\n",
    "print(\"MMC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_notmnist), np.std(mmc_out_notmnist)))\n",
    "print(\"MMC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_kmnist), np.std(mmc_out_kmnist)))\n",
    "\n",
    "print(\"AUROC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_fmnist), np.std(auroc_out_fmnist)))\n",
    "print(\"AUROC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_notmnist), np.std(auroc_out_notmnist)))\n",
    "print(\"AUROC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_kmnist), np.std(auroc_out_kmnist)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# KFAC Laplace Bridge"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist_test_in_LB_KFAC = predict_LB_KFAC(mnist_model, mnist_test_loader, M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_FMNIST_LB_KFAC = predict_LB_KFAC(mnist_model, FMNIST_test_loader, M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_notMNIST_LB_KFAC = predict_LB_KFAC(mnist_model, not_mnist_test_loader, M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_KMNIST_LB_KFAC = predict_LB_KFAC(mnist_model, KMNIST_test_loader, M_W_post_K, M_b_post_K, U_post_K, V_post_K, B_post_K, verbose=False, cuda=True, timing=True).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist_test_in_LB_KFACn = mnist_test_in_LB_KFAC/mnist_test_in_LB_KFAC.sum(1).reshape(-1,1)\n",
    "mnist_test_out_FMNIST_LB_KFACn = mnist_test_out_FMNIST_LB_KFAC/mnist_test_out_FMNIST_LB_KFAC.sum(1).reshape(-1,1)\n",
    "mnist_test_out_notMNIST_LB_KFACn = mnist_test_out_notMNIST_LB_KFAC/mnist_test_out_notMNIST_LB_KFAC.sum(1).reshape(-1,1)\n",
    "mnist_test_out_KMNIST_LB_KFACn = mnist_test_out_KMNIST_LB_KFAC/mnist_test_out_KMNIST_LB_KFAC.sum(1).reshape(-1,1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_in_LB_KFACn, prob_correct_in_LB_KFACn, ent_in_LB_KFACn, MMC_in_LB_KFACn = get_in_dist_values(mnist_test_in_LB_KFACn, targets)\n",
    "acc_out_FMNIST_LB_KFACn, prob_correct_out_FMNIST_LB_KFACn, ent_out_FMNIST_LB_KFACn, MMC_out_FMNIST_LB_KFACn, auroc_out_FMNIST_LB_KFACn = get_out_dist_values(mnist_test_in_LB_KFACn, mnist_test_out_FMNIST_LB_KFACn, targets_FMNIST)\n",
    "acc_out_notMNIST_LB_KFACn, prob_correct_out_notMNIST_LB_KFACn, ent_out_notMNIST_LB_KFACn, MMC_out_notMNIST_LB_KFACn, auroc_out_notMNIST_LB_KFACn = get_out_dist_values(mnist_test_in_LB_KFACn, mnist_test_out_notMNIST_LB_KFACn, targets_notMNIST)\n",
    "acc_out_KMNIST_LB_KFACn, prob_correct_out_KMNIST_LB_KFACn, ent_out_KMNIST_LB_KFACn, MMC_out_KMNIST_LB_KFACn, auroc_out_KMNIST_LB_KFACn = get_out_dist_values(mnist_test_in_LB_KFACn, mnist_test_out_KMNIST_LB_KFACn, targets_KMNIST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print_in_dist_values(acc_in_LB_KFACn, prob_correct_in_LB_KFACn, ent_in_LB_KFACn, MMC_in_LB_KFACn, 'MNIST', 'mnist', 'LB_KFACn')\n",
    "print_out_dist_values(acc_out_FMNIST_LB_KFACn, prob_correct_out_FMNIST_LB_KFACn, ent_out_FMNIST_LB_KFACn, MMC_out_FMNIST_LB_KFACn, auroc_out_FMNIST_LB_KFACn, 'MNIST', test='fmnist', method='LB_KFACn')\n",
    "print_out_dist_values(acc_out_notMNIST_LB_KFACn, prob_correct_out_notMNIST_LB_KFACn, ent_out_notMNIST_LB_KFACn, MMC_out_notMNIST_LB_KFACn, auroc_out_notMNIST_LB_KFACn, 'MNIST', test='notMNIST', method='LB_KFACn')\n",
    "print_out_dist_values(acc_out_KMNIST_LB_KFACn, prob_correct_out_KMNIST_LB_KFACn, ent_out_KMNIST_LB_KFACn, MMC_out_KMNIST_LB_KFACn, auroc_out_KMNIST_LB_KFACn, 'MNIST', test='KMNIST', method='LB_KFACn')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Laplace Bridge KFAC\n",
    "#seeds are 123,124,125,126,127\n",
    "time_lpb_in = [0.01302, 0.01731, 0.01489, 0.01529, 0.01574]\n",
    "time_lpb_out_fmnist = [0.01256, 0.01531, 0.01669, 0.01703, 0.01500]\n",
    "time_lpb_out_notmnist = [0.02332, 0.03115, 0.02666, 0.02756, 0.02864]\n",
    "time_lpb_out_kmnist = [0.01287, 0.01520, 0.01636, 0.01687, 0.01438]\n",
    "\n",
    "acc_in = [0.989, 0.989, 0.991, 0.988, 0.989]\n",
    "mmc_in = [0.988, 0.986, 0.988, 0.988, 0.988]\n",
    "mmc_out_fmnist = [0.493, 0.523, 0.433, 0.514, 0.488]\n",
    "mmc_out_notmnist = [0.735, 0.759, 0.756, 0.767, 0.769]\n",
    "mmc_out_kmnist = [0.699, 0.722, 0.707, 0.713, 0.703]\n",
    "\n",
    "auroc_out_fmnist = [0.991, 0.988, 0.993, 0.989, 0.990]\n",
    "auroc_out_notmnist = [0.948, 0.915, 0.938, 0.932, 0.924]\n",
    "auroc_out_kmnist = [0.970, 0.963, 0.969, 0.968, 0.968]\n",
    "\n",
    "print(\"Laplace Bridge time in: {:.03f} with std {:.03f}\".format(np.mean(time_lpb_in), np.std(time_lpb_in)))\n",
    "print(\"Laplace Bridge time out fmnist: {:.03f} with std {:.03f}\".format(np.mean(time_lpb_out_fmnist), np.std(time_lpb_out_fmnist)))\n",
    "print(\"Laplace Bridge time out notmnist: {:.03f} with std {:.03f}\".format(np.mean(time_lpb_out_notmnist), np.std(time_lpb_out_notmnist)))\n",
    "print(\"Laplace Bridge time out kmnist: {:.03f} with std {:.03f}\".format(np.mean(time_lpb_out_kmnist), np.std(time_lpb_out_kmnist)))\n",
    "\n",
    "print(\"accuracy: {:.03f} with std {:.03f}\".format(np.mean(acc_in), np.std(acc_in)))\n",
    "\n",
    "print(\"MMC in: {:.03f} with std {:.03f}\".format(np.mean(mmc_in), np.std(mmc_in)))\n",
    "print(\"MMC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_fmnist), np.std(mmc_out_fmnist)))\n",
    "print(\"MMC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_notmnist), np.std(mmc_out_notmnist)))\n",
    "print(\"MMC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_kmnist), np.std(mmc_out_kmnist)))\n",
    "\n",
    "print(\"AUROC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_fmnist), np.std(auroc_out_fmnist)))\n",
    "print(\"AUROC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_notmnist), np.std(auroc_out_notmnist)))\n",
    "print(\"AUROC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_kmnist), np.std(auroc_out_kmnist)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Conditions\n",
    "\n",
    "Test the condition derived in Proposition 1 of the paper and evaluated experimentally in Appendix A"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# check if condition holds\n",
    "\n",
    "def check_condition(alpha_vecs):\n",
    "    #note that this is vectorized\n",
    "    alpha_sum = alpha_vecs.sum(1)\n",
    "    alpha_max = alpha_vecs.max(1)\n",
    "    alpha_sum_minus = alpha_sum - alpha_max\n",
    "    right_side = 0.25 * (np.sqrt(9 * alpha_sum_minus**2 + 10 * alpha_sum_minus + 1) - alpha_sum_minus - 1)\n",
    "    cases = alpha_max > right_side\n",
    "    percentage = np.sum(cases)/len(cases)\n",
    "    return(percentage)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(np.sum(check_condition(mnist_test_in_LB)))\n",
    "print(np.sum(check_condition(mnist_test_out_FMNIST_LB)))\n",
    "print(np.sum(check_condition(mnist_test_out_notMNIST_LB)))\n",
    "print(np.sum(check_condition(mnist_test_out_KMNIST_LB)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Compare to extended MacKay approach"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist_test_in_EMK = predict_extended_MacKay(mnist_model, mnist_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_FMNIST_EMK = predict_extended_MacKay(mnist_model, FMNIST_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_notMNIST_EMK = predict_extended_MacKay(mnist_model, not_mnist_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_KMNIST_EMK = predict_extended_MacKay(mnist_model, KMNIST_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_in_EMK, prob_correct_in_EMK, ent_in_EMK, MMC_in_EMK = get_in_dist_values(mnist_test_in_EMK, targets)\n",
    "acc_out_FMNIST_EMK, prob_correct_out_FMNIST_EMK, ent_out_FMNIST_EMK, MMC_out_FMNIST_EMK, auroc_out_FMNIST_EMK = get_out_dist_values(mnist_test_in_EMK, mnist_test_out_FMNIST_EMK, targets_FMNIST)\n",
    "acc_out_notMNIST_EMK, prob_correct_out_notMNIST_EMK, ent_out_notMNIST_EMK, MMC_out_notMNIST_EMK, auroc_out_notMNIST_EMK = get_out_dist_values(mnist_test_in_EMK, mnist_test_out_notMNIST_EMK, targets_notMNIST)\n",
    "acc_out_KMNIST_EMK, prob_correct_out_KMNIST_EMK, ent_out_KMNIST_EMK, MMC_out_KMNIST_EMK, auroc_out_KMNIST_EMK = get_out_dist_values(mnist_test_in_EMK, mnist_test_out_KMNIST_EMK, targets_KMNIST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print_in_dist_values(acc_in_EMK, prob_correct_in_EMK, ent_in_EMK, MMC_in_EMK, 'MNIST', 'EMK')\n",
    "print_out_dist_values(acc_out_FMNIST_EMK, prob_correct_out_FMNIST_EMK, ent_out_FMNIST_EMK, MMC_out_FMNIST_EMK, auroc_out_FMNIST_EMK, 'MNIST', test='fmnist', method='EMK')\n",
    "print_out_dist_values(acc_out_notMNIST_EMK, prob_correct_out_notMNIST_EMK, ent_out_notMNIST_EMK, MMC_out_notMNIST_EMK, auroc_out_notMNIST_EMK, 'MNIST', test='notMNIST', method='EMK')\n",
    "print_out_dist_values(acc_out_KMNIST_EMK, prob_correct_out_KMNIST_EMK, ent_out_KMNIST_EMK, MMC_out_KMNIST_EMK, auroc_out_KMNIST_EMK, 'MNIST', test='KMNIST', method='EMK')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Extended MacKay\n",
    "#seeds are 123,124,125,126,127\n",
    "time_EMK_in = [0.059017181396484375, 0.055175065994262695, 0.04456186294555664, 0.052222251892089844, 0.05633044242858887]\n",
    "time_EMK_out_fmnist = [0.14669466018676758, 0.06929922103881836, 0.0457310676574707, 0.04770398139953613, 0.04364585876464844]\n",
    "time_EMK_out_notmnist = [0.06481504440307617, 0.08360505104064941, 0.07899594306945801, 0.0765230655670166, 0.09008359909057617]\n",
    "time_EMK_out_kmnist = [0.03671598434448242, 0.03630828857421875, 0.04480934143066406, 0.0473024845123291, 0.04279160499572754]\n",
    "\n",
    "acc_in = [0.989, 0.989, 0.991, 0.988, 0.989]\n",
    "mmc_in = [0.981, 0.978, 0.981, 0.982, 0.981]\n",
    "mmc_out_fmnist = [0.564, 0.577, 0.516, 0.573, 0.556]\n",
    "mmc_out_notmnist = [0.708, 0.741, 0.744, 0.747, 0.744]\n",
    "mmc_out_kmnist = [0.667, 0.681, 0.669, 0.678, 0.668]\n",
    "\n",
    "auroc_out_fmnist = [0.986, 0.982, 0.989, 0.984, 0.986]\n",
    "auroc_out_notmnist = [0.946, 0.909, 0.937, 0.928, 0.922]\n",
    "auroc_out_kmnist = [0.969, 0.962, 0.968, 0.967, 0.967]\n",
    "\n",
    "print(\"Extended MacKay time in: {:.03f} with std {:.03f}\".format(np.mean(time_EMK_in), np.std(time_EMK_in)))\n",
    "print(\"Extended MacKay time out fmnist: {:.03f} with std {:.03f}\".format(np.mean(time_EMK_out_fmnist), np.std(time_EMK_out_fmnist)))\n",
    "print(\"Extended MacKay time out notmnist: {:.03f} with std {:.03f}\".format(np.mean(time_EMK_out_notmnist), np.std(time_EMK_out_notmnist)))\n",
    "print(\"Extended MacKay time out kmnist: {:.03f} with std {:.03f}\".format(np.mean(time_EMK_out_kmnist), np.std(time_EMK_out_kmnist)))\n",
    "\n",
    "print(\"accuracy: {:.03f} with std {:.03f}\".format(np.mean(acc_in), np.std(acc_in)))\n",
    "\n",
    "print(\"MMC in: {:.03f} with std {:.03f}\".format(np.mean(mmc_in), np.std(mmc_in)))\n",
    "print(\"MMC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_fmnist), np.std(mmc_out_fmnist)))\n",
    "print(\"MMC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_notmnist), np.std(mmc_out_notmnist)))\n",
    "print(\"MMC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_kmnist), np.std(mmc_out_kmnist)))\n",
    "\n",
    "print(\"AUROC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_fmnist), np.std(auroc_out_fmnist)))\n",
    "print(\"AUROC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_notmnist), np.std(auroc_out_notmnist)))\n",
    "print(\"AUROC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_kmnist), np.std(auroc_out_kmnist)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Compare to Second-order Delta Posterior Predictive\n",
    "\n",
    "as detailed in Appendix D"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist_test_in_SODPP = predict_SODPP(mnist_model, mnist_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_FMNIST_SODPP = predict_SODPP(mnist_model, FMNIST_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_notMNIST_SODPP = predict_SODPP(mnist_model, not_mnist_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()\n",
    "mnist_test_out_KMNIST_SODPP = predict_SODPP(mnist_model, KMNIST_test_loader, M_W_post_D, M_b_post_D, C_W_post_D, C_b_post_D, verbose=False, cuda=True, timing=True).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_in_SODPP, prob_correct_in_SODPP, ent_in_SODPP, MMC_in_SODPP = get_in_dist_values(mnist_test_in_SODPP, targets)\n",
    "acc_out_FMNIST_SODPP, prob_correct_out_FMNIST_SODPP, ent_out_FMNIST_SODPP, MMC_out_FMNIST_SODPP, auroc_out_FMNIST_SODPP = get_out_dist_values(mnist_test_in_SODPP, mnist_test_out_FMNIST_SODPP, targets_FMNIST)\n",
    "acc_out_notMNIST_SODPP, prob_correct_out_notMNIST_SODPP, ent_out_notMNIST_SODPP, MMC_out_notMNIST_SODPP, auroc_out_notMNIST_SODPP = get_out_dist_values(mnist_test_in_SODPP, mnist_test_out_notMNIST_SODPP, targets_notMNIST)\n",
    "acc_out_KMNIST_SODPP, prob_correct_out_KMNIST_SODPP, ent_out_KMNIST_SODPP, MMC_out_KMNIST_SODPP, auroc_out_KMNIST_SODPP = get_out_dist_values(mnist_test_in_SODPP, mnist_test_out_KMNIST_SODPP, targets_KMNIST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print_in_dist_values(acc_in_SODPP, prob_correct_in_SODPP, ent_in_SODPP, MMC_in_SODPP, 'MNIST', 'SODPP')\n",
    "print_out_dist_values(acc_out_FMNIST_SODPP, prob_correct_out_FMNIST_SODPP, ent_out_FMNIST_SODPP, MMC_out_FMNIST_SODPP, auroc_out_FMNIST_SODPP, 'MNIST', test='fmnist', method='SODPP')\n",
    "print_out_dist_values(acc_out_notMNIST_SODPP, prob_correct_out_notMNIST_SODPP, ent_out_notMNIST_SODPP, MMC_out_notMNIST_SODPP, auroc_out_notMNIST_SODPP, 'MNIST', test='notMNIST', method='SODPP')\n",
    "print_out_dist_values(acc_out_KMNIST_SODPP, prob_correct_out_KMNIST_SODPP, ent_out_KMNIST_SODPP, MMC_out_KMNIST_SODPP, auroc_out_KMNIST_SODPP, 'MNIST', test='KMNIST', method='SODPP')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#SODPP\n",
    "#seeds are 123,124,125,126,127\n",
    "time_SODPP_in = [0.022600173950195312, 0.022752046585083008, 0.027388811111450195, 0.0277860164642334, 0.028126955032348633]\n",
    "time_SODPP_out_fmnist = [0.02283191680908203, 0.022715091705322266, 0.02962970733642578, 0.030440807342529297, 0.028356552124023438]\n",
    "time_SODPP_out_notmnist = [0.040780067443847656, 0.04070425033569336, 0.04935407638549805, 0.05025911331176758, 0.051819801330566406]\n",
    "time_SODPP_out_kmnist = [0.021996021270751953, 0.022888660430908203, 0.028764009475708008, 0.030774354934692383, 0.02728438377380371]\n",
    "\n",
    "acc_in = [0.989, 0.989, 0.990, 0.987, 0.990]\n",
    "mmc_in = [0.980, 0.977, 0.979, 0.980, 0.979]\n",
    "mmc_out_fmnist = [0.552, 0.565, 0.506, 0.561, 0.545]\n",
    "mmc_out_notmnist = [0.681, 0.719, 0.708, 0.724, 0.717]\n",
    "mmc_out_kmnist = [0.636, 0.648, 0.636, 0.648, 0.636]\n",
    "\n",
    "auroc_out_fmnist = [0.984, 0.979, 0.987, 0.982, 0.984]\n",
    "auroc_out_notmnist = [0.945, 0.910, 0.935, 0.928, 0.921]\n",
    "auroc_out_kmnist = [0.968, 0.960, 0.967, 0.966, 0.966]\n",
    "\n",
    "print(\"SODPP time in: {:.03f} with std {:.03f}\".format(np.mean(time_SODPP_in), np.std(time_SODPP_in)))\n",
    "print(\"SODPP time out fmnist: {:.03f} with std {:.03f}\".format(np.mean(time_SODPP_out_fmnist), np.std(time_SODPP_out_fmnist)))\n",
    "print(\"SODPP time out notmnist: {:.03f} with std {:.03f}\".format(np.mean(time_SODPP_out_notmnist), np.std(time_SODPP_out_notmnist)))\n",
    "print(\"SODPP time out kmnist: {:.03f} with std {:.03f}\".format(np.mean(time_SODPP_out_kmnist), np.std(time_SODPP_out_kmnist)))\n",
    "\n",
    "print(\"accuracy: {:.03f} with std {:.03f}\".format(np.mean(acc_in), np.std(acc_in)))\n",
    "\n",
    "print(\"MMC in: {:.03f} with std {:.03f}\".format(np.mean(mmc_in), np.std(mmc_in)))\n",
    "print(\"MMC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_fmnist), np.std(mmc_out_fmnist)))\n",
    "print(\"MMC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_notmnist), np.std(mmc_out_notmnist)))\n",
    "print(\"MMC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(mmc_out_kmnist), np.std(mmc_out_kmnist)))\n",
    "\n",
    "print(\"AUROC out fmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_fmnist), np.std(auroc_out_fmnist)))\n",
    "print(\"AUROC out notmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_notmnist), np.std(auroc_out_notmnist)))\n",
    "print(\"AUROC out kmnist: {:.03f} with std {:.03f}\".format(np.mean(auroc_out_kmnist), np.std(auroc_out_kmnist)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
