{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "MF7BncmmLBeO"
   },
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import torch\n",
    "from sklearn.datasets import load_digits\n",
    "from sklearn import datasets\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**DISCLAIMER**\n",
    "\n",
    "The presented code is not optimized, it serves an educational purpose. It is written for CPU, it uses only fully-connected networks and an extremely simplistic dataset. However, it contains all components that can help to understand how priors in a Variational Auto-Encoder (VAE) works, and it should be rather easy to extend it to more sophisticated models. This code could be run almost on any laptop/PC, and it takes a couple of minutes top to get the result."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "RKsmjLumL5A2"
   },
   "source": [
    "## Dataset: Digits"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "In this example, we go wild and use a dataset that is simpler than MNIST! We use a scipy dataset called Digits. It consists of ~1500 images of size 8x8, and each pixel can take values in $\\{0, 1, \\ldots, 16\\}$.\n",
    "\n",
    "The goal of using this dataset is that everyone can run it on a laptop, without any gpu etc."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "hSWUnXAYLLif"
   },
   "outputs": [],
   "source": [
    "class Digits(Dataset):\n",
    "    \"\"\"Scikit-Learn Digits dataset.\"\"\"\n",
    "\n",
    "    def __init__(self, mode='train', transforms=None):\n",
    "        digits = load_digits()\n",
    "        if mode == 'train':\n",
    "            self.data = digits.data[:1000].astype(np.float32)\n",
    "        elif mode == 'val':\n",
    "            self.data = digits.data[1000:1350].astype(np.float32)\n",
    "        else:\n",
    "            self.data = digits.data[1350:].astype(np.float32)\n",
    "\n",
    "        self.transforms = transforms\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        sample = self.data[idx]\n",
    "        if self.transforms:\n",
    "            sample = self.transforms(sample)\n",
    "        return sample"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "xQyrkrqAL7p8"
   },
   "source": [
    "## Auxiliary functions and classes"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "tw00sH-6L9yg"
   },
   "source": [
    "### Distributions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "AJh8NiXxLNf9"
   },
   "outputs": [],
   "source": [
    "PI = torch.from_numpy(np.asarray(np.pi))\n",
    "EPS = 1.e-5\n",
    "\n",
    "def log_categorical(x, p, num_classes=256, reduction=None, dim=None):\n",
    "    x_one_hot = F.one_hot(x.long(), num_classes=num_classes)\n",
    "    log_p = x_one_hot * torch.log(torch.clamp(p, EPS, 1. - EPS))\n",
    "    if reduction == 'avg':\n",
    "        return torch.mean(log_p, dim)\n",
    "    elif reduction == 'sum':\n",
    "        return torch.sum(log_p, dim)\n",
    "    else:\n",
    "        return log_p\n",
    "\n",
    "def log_bernoulli(x, p, reduction=None, dim=None):\n",
    "    pp = torch.clamp(p, EPS, 1. - EPS)\n",
    "    log_p = x * torch.log(pp) + (1. - x) * torch.log(1. - pp)\n",
    "    if reduction == 'avg':\n",
    "        return torch.mean(log_p, dim)\n",
    "    elif reduction == 'sum':\n",
    "        return torch.sum(log_p, dim)\n",
    "    else:\n",
    "        return log_p\n",
    "\n",
    "def log_normal_diag(x, mu, log_var, reduction=None, dim=None):\n",
    "    log_p = -0.5 * torch.log(2. * PI) - 0.5 * log_var - 0.5 * torch.exp(-log_var) * (x - mu)**2.\n",
    "    if reduction == 'avg':\n",
    "        return torch.mean(log_p, dim)\n",
    "    elif reduction == 'sum':\n",
    "        return torch.sum(log_p, dim)\n",
    "    else:\n",
    "        return log_p\n",
    "\n",
    "def log_standard_normal(x, reduction=None, dim=None):\n",
    "    log_p = -0.5 * torch.log(2. * PI) - 0.5 * x**2.\n",
    "    if reduction == 'avg':\n",
    "        return torch.mean(log_p, dim)\n",
    "    elif reduction == 'sum':\n",
    "        return torch.sum(log_p, dim)\n",
    "    else:\n",
    "        return log_p\n",
    "\n",
    "# Chakraborty & Chakravarty, \"A new discrete probability distribution with integer support on (−∞, ∞)\",\n",
    "#  Communications in Statistics - Theory and Methods, 45:2, 492-505, DOI: 10.1080/03610926.2013.830743\n",
    "\n",
    "def log_min_exp(a, b, epsilon=1e-8):\n",
    "    \"\"\"\n",
    "    Source: https://github.com/jornpeters/integer_discrete_flows\n",
    "    Computes the log of exp(a) - exp(b) in a (more) numerically stable fashion.\n",
    "    Using:\n",
    "    log(exp(a) - exp(b))\n",
    "    c + log(exp(a-c) - exp(b-c))\n",
    "    a + log(1 - exp(b-a))\n",
    "    And note that we assume b < a always.\n",
    "    \"\"\"\n",
    "    y = a + torch.log(1 - torch.exp(b - a) + epsilon)\n",
    "\n",
    "    return y\n",
    "\n",
    "def log_integer_probability(x, mean, logscale):\n",
    "    scale = torch.exp(logscale)\n",
    "\n",
    "    logp = log_min_exp(\n",
    "      F.logsigmoid((x + 0.5 - mean) / scale),\n",
    "      F.logsigmoid((x - 0.5 - mean) / scale))\n",
    "\n",
    "    return logp\n",
    "\n",
    "def log_integer_probability_standard(x):\n",
    "    logp = log_min_exp(\n",
    "      F.logsigmoid(x + 0.5),\n",
    "      F.logsigmoid(x - 0.5))\n",
    "\n",
    "    return logp"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "krAtPNvEMBWh"
   },
   "source": [
    "### Rounding (Straight-Through Estimator)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "kgRXYSfeLPwp"
   },
   "outputs": [],
   "source": [
    "# Source: https://github.com/jornpeters/integer_discrete_flows\n",
    "class RoundStraightThrough(torch.autograd.Function):\n",
    "    \n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "\n",
    "    @staticmethod\n",
    "    def forward(ctx, input):\n",
    "        rounded = torch.round(input, out=None)\n",
    "        return rounded\n",
    "\n",
    "    @staticmethod\n",
    "    def backward(ctx, grad_output):\n",
    "        grad_input = grad_output.clone()\n",
    "        return grad_input"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "qSP2qiMqMICK"
   },
   "source": [
    "## Variational Auto-Encoder"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "-xPJCTrsMPZY"
   },
   "source": [
    "### Encoder"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "0VJfbUt2LRXi"
   },
   "outputs": [],
   "source": [
    "class Encoder(nn.Module):\n",
    "    def __init__(self, encoder_net):\n",
    "        super(Encoder, self).__init__()\n",
    "\n",
    "        self.encoder = encoder_net\n",
    "\n",
    "        self.round = RoundStraightThrough.apply\n",
    "\n",
    "    def reparameterization(self, mu_e, log_var_e):\n",
    "        std = torch.exp(0.5*log_var_e)\n",
    "        eps = torch.randn_like(std)\n",
    "        return mu_e + std * eps\n",
    "\n",
    "    def encode(self, x):\n",
    "        h_e = self.encoder(x)\n",
    "        mu_e, log_var_e = torch.chunk(h_e, 2, dim=1)\n",
    "        return mu_e, log_var_e\n",
    "\n",
    "    def sample(self, x=None, mu_e=None, log_var_e=None):\n",
    "        if (mu_e is None) and (log_var_e is None):\n",
    "            mu_e, log_var_e = self.encode(x)\n",
    "        else:\n",
    "            if (mu_e is None) or (log_var_e is None):\n",
    "                raise ValueError('mu and log-scale can`t be None!')\n",
    "            z = self.reparameterization(mu_e, log_var_e)\n",
    "        return z\n",
    "\n",
    "    def log_prob(self, x=None, mu_e=None, log_var_e=None, z=None):\n",
    "        if x is not None:\n",
    "            mu_e, log_var_e = self.encode(x)\n",
    "            z = self.sample(mu_e=mu_e, log_var_e=log_var_e)\n",
    "        else:\n",
    "            if (mu_e is None) or (log_var_e is None) or (z is None):\n",
    "                raise ValueError('mu, log-scale and z can`t be None!')\n",
    "\n",
    "        return log_normal_diag(z, mu_e, log_var_e)\n",
    "\n",
    "    def forward(self, x, type='log_prob'):\n",
    "        assert type in ['encode', 'log_prob'], 'Type could be either encode or log_prob'\n",
    "        if type == 'log_prob':\n",
    "            return self.log_prob(x)\n",
    "        else:\n",
    "            return self.sample(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "znlDY599MRLy"
   },
   "source": [
    "### Decoder"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "x5Q8Iz41LTAj"
   },
   "outputs": [],
   "source": [
    "class Decoder(nn.Module):\n",
    "    def __init__(self, decoder_net, distribution='categorical', num_vals=None):\n",
    "        super(Decoder, self).__init__()\n",
    "\n",
    "        self.decoder = decoder_net\n",
    "        self.distribution = distribution\n",
    "        self.num_vals=num_vals\n",
    "\n",
    "    def decode(self, z):\n",
    "        h_d = self.decoder(z)\n",
    "\n",
    "        if self.distribution == 'categorical':\n",
    "            b = h_d.shape[0]\n",
    "            d = h_d.shape[1]//self.num_vals\n",
    "            h_d = h_d.view(b, d, self.num_vals)\n",
    "            mu_d = torch.softmax(h_d, 2)\n",
    "            return [mu_d]\n",
    "\n",
    "        elif self.distribution == 'bernoulli':\n",
    "            mu_d = torch.sigmoid(h_d)\n",
    "            return [mu_d]\n",
    "\n",
    "        else:\n",
    "            raise ValueError('Either `categorical` or `bernoulli`')\n",
    "\n",
    "    def sample(self, z):\n",
    "        outs = self.decode(z)\n",
    "\n",
    "        if self.distribution == 'categorical':\n",
    "            mu_d = outs[0]\n",
    "            b = mu_d.shape[0]\n",
    "            m = mu_d.shape[1]\n",
    "            mu_d = mu_d.view(mu_d.shape[0], -1, self.num_vals)\n",
    "            p = mu_d.view(-1, self.num_vals)\n",
    "            x_new = torch.multinomial(p, num_samples=1).view(b, m)\n",
    "\n",
    "        elif self.distribution == 'bernoulli':\n",
    "            mu_d = outs[0]\n",
    "            x_new = torch.bernoulli(mu_d)\n",
    "\n",
    "        else:\n",
    "            raise ValueError('Either `categorical` or `bernoulli`')\n",
    "\n",
    "        return x_new\n",
    "\n",
    "    def log_prob(self, x, z):\n",
    "        outs = self.decode(z)\n",
    "\n",
    "        if self.distribution == 'categorical':\n",
    "            mu_d = outs[0]\n",
    "            log_p = log_categorical(x, mu_d, num_classes=self.num_vals, reduction='sum', dim=-1).sum(-1)\n",
    "\n",
    "        elif self.distribution == 'bernoulli':\n",
    "            mu_d = outs[0]\n",
    "            log_p = log_bernoulli(x, mu_d, reduction='sum', dim=-1)\n",
    "\n",
    "        else:\n",
    "            raise ValueError('Either `categorical` or `bernoulli`')\n",
    "\n",
    "        return log_p\n",
    "\n",
    "    def forward(self, z, x=None, type='log_prob'):\n",
    "        assert type in ['decoder', 'log_prob'], 'Type could be either decode or log_prob'\n",
    "        if type == 'log_prob':\n",
    "            return self.log_prob(x, z)\n",
    "        else:\n",
    "            return self.sample(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "m_fYlZJiMSih"
   },
   "source": [
    "### GTM Prior"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "xJYhNXulLUhz"
   },
   "outputs": [],
   "source": [
    "class GTMPrior(nn.Module):\n",
    "    def __init__(self, L, gtm_net, num_components, u_min=-1., u_max=1.):\n",
    "        super(GTMPrior, self).__init__()\n",
    "\n",
    "        self.L = L\n",
    "\n",
    "        # 2D manifold\n",
    "        self.u = torch.zeros(num_components**2, 2) # K**2 x 2\n",
    "        u1 = torch.linspace(u_min, u_max, steps=num_components)\n",
    "        u2 = torch.linspace(u_min, u_max, steps=num_components)\n",
    "\n",
    "        k = 0\n",
    "        for i in range(num_components):\n",
    "            for j in range(num_components):\n",
    "                self.u[k,0] = u1[i]\n",
    "                self.u[k,1] = u2[j]\n",
    "                k = k + 1\n",
    "\n",
    "        # gtm network: u -> z\n",
    "        self.gtm_net = gtm_net\n",
    "\n",
    "        # mixing weights\n",
    "        self.w = nn.Parameter(torch.zeros(num_components**2, 1, 1))\n",
    "\n",
    "    def get_params(self):\n",
    "        # u->z\n",
    "        h_gtm = self.gtm_net(self.u) #K**2 x 2L\n",
    "        mean_gtm, logvar_gtm = torch.chunk(h_gtm, 2, dim=1) # K**2 x L and K**2 x L\n",
    "        return mean_gtm, logvar_gtm\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        # u->z\n",
    "        mean_gtm, logvar_gtm = self.get_params()\n",
    "\n",
    "        # mixing probabilities\n",
    "        w = F.softmax(self.w, dim=0)\n",
    "        w = w.squeeze()\n",
    "\n",
    "        # pick components\n",
    "        indexes = torch.multinomial(w, batch_size, replacement=True)\n",
    "\n",
    "        # means and logvars\n",
    "        eps = torch.randn(batch_size, self.L)\n",
    "        for i in range(batch_size):\n",
    "            indx = indexes[i]\n",
    "            if i == 0:\n",
    "                z = mean_gtm[[indx]] + eps[[i]] * torch.exp(logvar_gtm[[indx]])\n",
    "            else:\n",
    "                z = torch.cat((z, mean_gtm[[indx]] + eps[[i]] * torch.exp(logvar_gtm[[indx]])), 0)\n",
    "        return z\n",
    "\n",
    "    def log_prob(self, z):\n",
    "        # u->z\n",
    "        mean_gtm, logvar_gtm = self.get_params()\n",
    "\n",
    "        # log-mixture-of-Gaussians\n",
    "        z = z.unsqueeze(0) # 1 x B x L\n",
    "        mean_gtm = mean_gtm.unsqueeze(1) # K**2 x 1 x L\n",
    "        logvar_gtm = logvar_gtm.unsqueeze(1) # K**2 x 1 x L\n",
    "\n",
    "        w = F.softmax(self.w, dim=0)\n",
    "\n",
    "        log_p = log_normal_diag(z, mean_gtm, logvar_gtm) + torch.log(w) # K**2 x B x L\n",
    "        log_prob = torch.logsumexp(log_p, dim=0, keepdim=False) # B x L\n",
    "\n",
    "        return log_prob"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "IGuCJH0nrW_R"
   },
   "source": [
    "### VampPrior"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "XepJV-O4rZFn"
   },
   "outputs": [],
   "source": [
    "class VampPrior(nn.Module):\n",
    "    def __init__(self, L, D, num_vals, encoder, num_components, data=None):\n",
    "        super(VampPrior, self).__init__()\n",
    "\n",
    "        self.L = L\n",
    "        self.D = D\n",
    "        self.num_vals = num_vals\n",
    "\n",
    "        self.encoder = encoder\n",
    "\n",
    "        # pseudoinputs\n",
    "        u = torch.rand(num_components, D) * self.num_vals\n",
    "        self.u = nn.Parameter(u)\n",
    "\n",
    "        # mixing weights\n",
    "        self.w = nn.Parameter(torch.zeros(self.u.shape[0], 1, 1)) # K x 1 x 1\n",
    "\n",
    "    def get_params(self):\n",
    "        # u->encoder->mu, lof_var\n",
    "        mean_vampprior, logvar_vampprior = self.encoder.encode(self.u) #(K x L), (K x L)\n",
    "        return mean_vampprior, logvar_vampprior\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        # u->encoder->mu, lof_var\n",
    "        mean_vampprior, logvar_vampprior = self.get_params()\n",
    "\n",
    "        # mixing probabilities\n",
    "        w = F.softmax(self.w, dim=0) # K x 1 x 1 \n",
    "        w = w.squeeze()\n",
    "\n",
    "        # pick components\n",
    "        indexes = torch.multinomial(w, batch_size, replacement=True)\n",
    "\n",
    "        # means and logvars\n",
    "        eps = torch.randn(batch_size, self.L)\n",
    "        for i in range(batch_size):\n",
    "            indx = indexes[i]\n",
    "            if i == 0:\n",
    "                z = mean_vampprior[[indx]] + eps[[i]] * torch.exp(logvar_vampprior[[indx]])\n",
    "            else:\n",
    "                z = torch.cat((z, mean_vampprior[[indx]] + eps[[i]] * torch.exp(logvar_vampprior[[indx]])), 0)\n",
    "        return z\n",
    "\n",
    "    def log_prob(self, z):\n",
    "        # u->encoder->mu, lof_var\n",
    "        mean_vampprior, logvar_vampprior = self.get_params() # (K x L) & (K x L)\n",
    "\n",
    "        # mixing probabilities\n",
    "        w = F.softmax(self.w, dim=0) # K x 1 x 1\n",
    "\n",
    "        # log-mixture-of-Gaussians\n",
    "        z = z.unsqueeze(0) # 1 x B x L\n",
    "        mean_vampprior = mean_vampprior.unsqueeze(1) # K x 1 x L\n",
    "        logvar_vampprior = logvar_vampprior.unsqueeze(1) # K x 1 x L\n",
    "\n",
    "        log_p = log_normal_diag(z, mean_vampprior, logvar_vampprior) + torch.log(w) # K x B x L\n",
    "        log_prob = torch.logsumexp(log_p, dim=0, keepdim=False) # B x L\n",
    "\n",
    "        return log_prob # B "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "s-w76ECk6KrX"
   },
   "source": [
    "### VampPrior + GTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "lv3plbyo6NbP"
   },
   "outputs": [],
   "source": [
    "class GTMVampPrior(nn.Module):\n",
    "    def __init__(self, L, D, gtm_net, encoder, num_points, u_min=-10., u_max=10., num_vals=255):\n",
    "        super(GTMVampPrior, self).__init__()\n",
    "\n",
    "        self.L = L\n",
    "        self.D = D\n",
    "        self.num_vals = num_vals\n",
    "\n",
    "        self.encoder = encoder\n",
    "\n",
    "        # 2D manifold\n",
    "        self.u = torch.zeros(num_points**2, 2) # K**2 x 2\n",
    "        u1 = torch.linspace(u_min, u_max, steps=num_points)\n",
    "        u2 = torch.linspace(u_min, u_max, steps=num_points)\n",
    "\n",
    "        k = 0\n",
    "        for i in range(num_points):\n",
    "            for j in range(num_points):\n",
    "                self.u[k,0] = u1[i]\n",
    "                self.u[k,1] = u2[j]\n",
    "                k = k + 1\n",
    "\n",
    "        # gtm network: u -> x\n",
    "        self.gtm_net = gtm_net\n",
    "\n",
    "        # mixing weights\n",
    "        self.w = nn.Parameter(torch.zeros(num_points**2, 1, 1))\n",
    "\n",
    "    def get_params(self):\n",
    "        # u->gtm_net->u_x\n",
    "        h_gtm = self.gtm_net(self.u) #K x D\n",
    "        h_gtm = h_gtm * self.num_vals\n",
    "        # u_x->encoder->mu, lof_var\n",
    "        mean_vampprior, logvar_vampprior = self.encoder.encode(h_gtm) #(K x L), (K x L)\n",
    "        return mean_vampprior, logvar_vampprior\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        # u->encoder->mu, lof_var\n",
    "        mean_vampprior, logvar_vampprior = self.get_params()\n",
    "\n",
    "        # mixing probabilities\n",
    "        w = F.softmax(self.w, dim=0)\n",
    "        w = w.squeeze()\n",
    "\n",
    "        # pick components\n",
    "        indexes = torch.multinomial(w, batch_size, replacement=True)\n",
    "\n",
    "        # means and logvars\n",
    "        eps = torch.randn(batch_size, self.L)\n",
    "        for i in range(batch_size):\n",
    "            indx = indexes[i]\n",
    "            if i == 0:\n",
    "                z = mean_vampprior[[indx]] + eps[[i]] * torch.exp(logvar_vampprior[[indx]])\n",
    "            else:\n",
    "                z = torch.cat((z, mean_vampprior[[indx]] + eps[[i]] * torch.exp(logvar_vampprior[[indx]])), 0)\n",
    "        return z\n",
    "\n",
    "    def log_prob(self, z):\n",
    "        # u->encoder->mu, lof_var\n",
    "        mean_vampprior, logvar_vampprior = self.get_params()\n",
    "\n",
    "        # mixing probabilities\n",
    "        w = F.softmax(self.w, dim=0)\n",
    "\n",
    "        # log-mixture-of-Gaussians\n",
    "        z = z.unsqueeze(0) # 1 x B x L\n",
    "        mean_vampprior = mean_vampprior.unsqueeze(1) # K x 1 x L\n",
    "        logvar_vampprior = logvar_vampprior.unsqueeze(1) # K x 1 x L\n",
    "\n",
    "        log_p = log_normal_diag(z, mean_vampprior, logvar_vampprior) + torch.log(w) # K x B x L\n",
    "        log_prob = torch.logsumexp(log_p, dim=0, keepdim=False) # B x L\n",
    "\n",
    "        return log_prob"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "SxwS02IsB4Il"
   },
   "source": [
    "### Mixture of Gaussians"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "ByxkEv4-B6Sl"
   },
   "outputs": [],
   "source": [
    "class MoGPrior(nn.Module):\n",
    "    def __init__(self, L, num_components):\n",
    "        super(MoGPrior, self).__init__()\n",
    "\n",
    "        self.L = L\n",
    "        self.num_components = num_components\n",
    "\n",
    "        # params\n",
    "        self.means = nn.Parameter(torch.randn(num_components, self.L)*multiplier)\n",
    "        self.logvars = nn.Parameter(torch.randn(num_components, self.L))\n",
    "\n",
    "        # mixing weights\n",
    "        self.w = nn.Parameter(torch.zeros(num_components, 1, 1))\n",
    "\n",
    "    def get_params(self):\n",
    "        return self.means, self.logvars\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        # mu, lof_var\n",
    "        means, logvars = self.get_params()\n",
    "\n",
    "        # mixing probabilities\n",
    "        w = F.softmax(self.w, dim=0)\n",
    "        w = w.squeeze()\n",
    "\n",
    "        # pick components\n",
    "        indexes = torch.multinomial(w, batch_size, replacement=True)\n",
    "\n",
    "        # means and logvars\n",
    "        eps = torch.randn(batch_size, self.L)\n",
    "        for i in range(batch_size):\n",
    "            indx = indexes[i]\n",
    "            if i == 0:\n",
    "                z = means[[indx]] + eps[[i]] * torch.exp(logvars[[indx]])\n",
    "            else:\n",
    "                z = torch.cat((z, means[[indx]] + eps[[i]] * torch.exp(logvars[[indx]])), 0)\n",
    "        return z\n",
    "\n",
    "    def log_prob(self, z):\n",
    "        # mu, lof_var\n",
    "        means, logvars = self.get_params()\n",
    "\n",
    "        # mixing probabilities\n",
    "        w = F.softmax(self.w, dim=0)\n",
    "\n",
    "        # log-mixture-of-Gaussians\n",
    "        z = z.unsqueeze(0) # 1 x B x L\n",
    "        means = means.unsqueeze(1) # K x 1 x L\n",
    "        logvars = logvars.unsqueeze(1) # K x 1 x L\n",
    "\n",
    "        log_p = log_normal_diag(z, means, logvars) + torch.log(w) # K x B x L\n",
    "        log_prob = torch.logsumexp(log_p, dim=0, keepdim=False) # B x L\n",
    "\n",
    "        return log_prob"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "4UL9-AjVP-0d"
   },
   "source": [
    "### Standard Gaussian"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "4LJZIPpFQAo9"
   },
   "outputs": [],
   "source": [
    "class StandardPrior(nn.Module):\n",
    "    def __init__(self, L=2):\n",
    "        super(StandardPrior, self).__init__()\n",
    "\n",
    "        self.L = L \n",
    "\n",
    "        # params weights\n",
    "        self.means = torch.zeros(1, L)\n",
    "        self.logvars = torch.zeros(1, L)\n",
    "\n",
    "    def get_params(self):\n",
    "        return self.means, self.logvars\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        return torch.randn(batch_size, self.L)\n",
    "    \n",
    "    def log_prob(self, z):\n",
    "        return log_standard_normal(z)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "rPyZATGlsbvP"
   },
   "source": [
    "### Flow-based prior"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "ms4wsT8OtN3f"
   },
   "outputs": [],
   "source": [
    "class FlowPrior(nn.Module):\n",
    "    def __init__(self, nets, nett, num_flows, D=2):\n",
    "        super(FlowPrior, self).__init__()\n",
    "\n",
    "        self.D = D\n",
    "\n",
    "        self.t = torch.nn.ModuleList([nett() for _ in range(num_flows)])\n",
    "        self.s = torch.nn.ModuleList([nets() for _ in range(num_flows)])\n",
    "        self.num_flows = num_flows\n",
    "\n",
    "    def coupling(self, x, index, forward=True):\n",
    "        (xa, xb) = torch.chunk(x, 2, 1)\n",
    "\n",
    "        s = self.s[index](xa)\n",
    "        t = self.t[index](xa)\n",
    "\n",
    "        if forward:\n",
    "            #yb = f^{-1}(x)\n",
    "            yb = (xb - t) * torch.exp(-s)\n",
    "        else:\n",
    "            #xb = f(y)\n",
    "            yb = torch.exp(s) * xb + t\n",
    "\n",
    "        return torch.cat((xa, yb), 1), s\n",
    "\n",
    "    def permute(self, x):\n",
    "        return x.flip(1)\n",
    "\n",
    "    def f(self, x):\n",
    "        log_det_J, z = x.new_zeros(x.shape[0]), x\n",
    "        for i in range(self.num_flows):\n",
    "            z, s = self.coupling(z, i, forward=True)\n",
    "            z = self.permute(z)\n",
    "            log_det_J = log_det_J - s.sum(dim=1)\n",
    "\n",
    "        return z, log_det_J\n",
    "\n",
    "    def f_inv(self, z):\n",
    "        x = z\n",
    "        for i in reversed(range(self.num_flows)):\n",
    "            x = self.permute(x)\n",
    "            x, _ = self.coupling(x, i, forward=False)\n",
    "\n",
    "        return x\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        z = torch.randn(batch_size, self.D)\n",
    "        x = self.f_inv(z)\n",
    "        return x.view(-1, self.D)\n",
    "\n",
    "    def log_prob(self, x):\n",
    "        z, log_det_J = self.f(x)\n",
    "        log_p = (log_standard_normal(z) + log_det_J.unsqueeze(1))\n",
    "        return log_p"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "sCoRJeYpMToZ"
   },
   "source": [
    "### VAE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**NOTE** Please notice that here we pass an object of the encoder and an object of the decoder instead of neural networks. This is a difference comparing to the previous code of the VAE."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "GRYA6JA4LWEC"
   },
   "outputs": [],
   "source": [
    "class VAE(nn.Module):\n",
    "    def __init__(self, encoder, decoder, prior, num_vals=256, L=16, likelihood_type='categorical'):\n",
    "        super(VAE, self).__init__()\n",
    "\n",
    "        print('VAE by JT.')\n",
    "\n",
    "        self.encoder = encoder\n",
    "        self.decoder = decoder\n",
    "        self.prior = prior\n",
    "\n",
    "        self.num_vals = num_vals\n",
    "\n",
    "        self.likelihood_type = likelihood_type\n",
    "\n",
    "    def forward(self, x, reduction='avg'):\n",
    "        # encoder\n",
    "        mu_e, log_var_e = self.encoder.encode(x)\n",
    "        z = self.encoder.sample(mu_e=mu_e, log_var_e=log_var_e)\n",
    "\n",
    "        # ELBO\n",
    "        RE = self.decoder.log_prob(x, z)\n",
    "        KL = (self.prior.log_prob(z) - self.encoder.log_prob(mu_e=mu_e, log_var_e=log_var_e, z=z)).sum(-1)\n",
    "\n",
    "        error = 0\n",
    "        if np.isnan(RE.detach().numpy()).any():\n",
    "            print('RE {}'.format(RE))\n",
    "            error = 1\n",
    "        if np.isnan(KL.detach().numpy()).any():\n",
    "            print('RE {}'.format(KL))\n",
    "            error = 1\n",
    "\n",
    "        if error == 1:\n",
    "            raise ValueError()\n",
    "\n",
    "        if reduction == 'sum':\n",
    "            return -(RE + KL).sum()\n",
    "        else:\n",
    "            return -(RE + KL).mean()\n",
    "\n",
    "    def sample(self, batch_size=64):\n",
    "        z = self.prior.sample(batch_size=batch_size)\n",
    "        return self.decoder.sample(z)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "vUoPkTmrMVnx"
   },
   "source": [
    "## Evaluation and Training functions"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "JvwmRoi7MVto"
   },
   "source": [
    "**Evaluation step, sampling and curve plotting**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "JHx4RIqDLZe9"
   },
   "outputs": [],
   "source": [
    "def evaluation(test_loader, name=None, model_best=None, epoch=None):\n",
    "    # EVALUATION\n",
    "    if model_best is None:\n",
    "        # load best performing model\n",
    "        model_best = torch.load(name + '.model')\n",
    "\n",
    "    model_best.eval()\n",
    "    loss = 0.\n",
    "    N = 0.\n",
    "    for indx_batch, test_batch in enumerate(test_loader):\n",
    "        loss_t = model_best.forward(test_batch, reduction='sum')\n",
    "        loss = loss + loss_t.item()\n",
    "        N = N + test_batch.shape[0]\n",
    "    loss = loss / N\n",
    "\n",
    "    if epoch is None:\n",
    "        print(f'FINAL LOSS: nll={loss}')\n",
    "    else:\n",
    "        print(f'Epoch: {epoch}, val nll={loss}')\n",
    "\n",
    "    return loss\n",
    "\n",
    "\n",
    "def samples_real(name, test_loader):\n",
    "    # REAL-------\n",
    "    num_x = 4\n",
    "    num_y = 4\n",
    "    x = next(iter(test_loader)).detach().numpy()\n",
    "\n",
    "    fig, ax = plt.subplots(num_x, num_y)\n",
    "    for i, ax in enumerate(ax.flatten()):\n",
    "        plottable_image = np.reshape(x[i], (8, 8))\n",
    "        ax.imshow(plottable_image, cmap='gray')\n",
    "        ax.axis('off')\n",
    "\n",
    "    plt.savefig(name+'_real_images.pdf', bbox_inches='tight')\n",
    "    plt.close()\n",
    "\n",
    "\n",
    "def samples_generated(name, data_loader, extra_name=''):\n",
    "    x = next(iter(data_loader)).detach().numpy()\n",
    "\n",
    "    # GENERATIONS-------\n",
    "    model_best = torch.load(name + '.model')\n",
    "    model_best.eval()\n",
    "\n",
    "    num_x = 4\n",
    "    num_y = 4\n",
    "    x = model_best.sample(num_x * num_y)\n",
    "    x = x.detach().numpy()\n",
    "\n",
    "    fig, ax = plt.subplots(num_x, num_y)\n",
    "    for i, ax in enumerate(ax.flatten()):\n",
    "        plottable_image = np.reshape(x[i], (8, 8))\n",
    "        ax.imshow(plottable_image, cmap='gray')\n",
    "        ax.axis('off')\n",
    "\n",
    "    plt.savefig(name + '_generated_images' + extra_name + '.pdf', bbox_inches='tight')\n",
    "    plt.close()\n",
    "\n",
    "\n",
    "def plot_curve(name, nll_val):\n",
    "    plt.plot(np.arange(len(nll_val)), nll_val, linewidth='3')\n",
    "    plt.xlabel('epochs')\n",
    "    plt.ylabel('nll')\n",
    "    plt.savefig(name + '_nll_val_curve.pdf', bbox_inches='tight')\n",
    "    plt.close()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "umU3VYKzMbDt"
   },
   "source": [
    "**Training step**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "NxkUZ1xVLbm_"
   },
   "outputs": [],
   "source": [
    "def training(name, max_patience, num_epochs, model, optimizer, training_loader, val_loader):\n",
    "    nll_val = []\n",
    "    best_nll = 1000.\n",
    "    patience = 0\n",
    "\n",
    "    # Main loop\n",
    "    for e in range(num_epochs):\n",
    "        # TRAINING\n",
    "        model.train()\n",
    "        for indx_batch, batch in enumerate(training_loader):\n",
    "            if hasattr(model, 'dequantization'):\n",
    "                if model.dequantization:\n",
    "                    batch = batch + torch.rand(batch.shape)\n",
    "            loss = model.forward(batch)\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward(retain_graph=True)\n",
    "            optimizer.step()\n",
    "\n",
    "        # Validation\n",
    "        loss_val = evaluation(val_loader, model_best=model, epoch=e)\n",
    "        nll_val.append(loss_val)  # save for plotting\n",
    "\n",
    "        if e == 0:\n",
    "            print('saved!')\n",
    "            torch.save(model, name + '.model')\n",
    "            best_nll = loss_val\n",
    "        else:\n",
    "            if loss_val < best_nll:\n",
    "                print('saved!')\n",
    "                torch.save(model, name + '.model')\n",
    "                best_nll = loss_val\n",
    "                patience = 0\n",
    "\n",
    "                samples_generated(name, val_loader, extra_name=\"_epoch_\" + str(e))\n",
    "            else:\n",
    "                patience = patience + 1\n",
    "\n",
    "        if patience > max_patience:\n",
    "            break\n",
    "\n",
    "    nll_val = np.asarray(nll_val)\n",
    "\n",
    "    return nll_val"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "0BXJ9dN0MinB"
   },
   "source": [
    "## Experiments"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "KsF7f-Q-MkWu"
   },
   "source": [
    "**Initialize datasets**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "fqZKMNM0LdQ1"
   },
   "outputs": [],
   "source": [
    "train_data = Digits(mode='train')\n",
    "val_data = Digits(mode='val')\n",
    "test_data = Digits(mode='test')\n",
    "\n",
    "training_loader = DataLoader(train_data, batch_size=64, shuffle=True)\n",
    "val_loader = DataLoader(val_data, batch_size=64, shuffle=False)\n",
    "test_loader = DataLoader(test_data, batch_size=64, shuffle=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "6lEKUznpMns7"
   },
   "source": [
    "**Hyperparameters**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "ANQo7LrGLjIN"
   },
   "outputs": [],
   "source": [
    "prior_name = 'vampprior'\n",
    "\n",
    "if prior_name in ['standard', 'flow']:\n",
    "    num_components = 1\n",
    "elif prior_name[0:3] == 'gtm':\n",
    "    num_components = 4\n",
    "else:\n",
    "    num_components = 4**2\n",
    "\n",
    "\n",
    "D = 64   # input dimension\n",
    "L = 2 # number of latents\n",
    "M = 256  # the number of neurons in scale (s) and translation (t) nets\n",
    "\n",
    "lr = 1e-3 # learning rate\n",
    "num_epochs = 1000 # max. number of epochs\n",
    "max_patience = 20 # an early stopping is used, if training doesn't improve for longer than 20 epochs, it is stopped"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "-7APXeunMrDh"
   },
   "source": [
    "**Creating a folder for results**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "bjSUn1eWLkWm"
   },
   "outputs": [],
   "source": [
    "name = 'vae_' + prior_name + '_' + str(num_components) + '_' + str(L)\n",
    "result_dir ='results/' + name + '/'\n",
    "if not(os.path.exists(result_dir)):\n",
    "    os.mkdir(result_dir)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Hpwm6LWUMulQ"
   },
   "source": [
    "**Initializing the model: (i) determining the conditional likelihood distribution, (ii) defininig encoder and decoder nets, and a prior**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "FrnNsCqQLmK3",
    "outputId": "5f0cf2b1-0a96-4f5c-da9e-f78f909a5259"
   },
   "outputs": [],
   "source": [
    "likelihood_type = 'categorical'\n",
    "\n",
    "if likelihood_type == 'categorical':\n",
    "    num_vals = 17\n",
    "elif likelihood_type == 'bernoulli':\n",
    "    num_vals = 1\n",
    "\n",
    "# First, we initialize the encoder and the decoder\n",
    "# -encoder\n",
    "encoder_net = nn.Sequential(nn.Linear(D, M), nn.LeakyReLU(),\n",
    "                        nn.Linear(M, M), nn.LeakyReLU(),\n",
    "                        nn.Linear(M, 2 * L))\n",
    "\n",
    "encoder = Encoder(encoder_net=encoder_net)\n",
    "\n",
    "# -decoder\n",
    "decoder_net = nn.Sequential(nn.Linear(L, M), nn.LeakyReLU(),\n",
    "                        nn.Linear(M, M), nn.LeakyReLU(),\n",
    "                        nn.Linear(M, num_vals * D))\n",
    "\n",
    "decoder = Decoder(distribution=likelihood_type, decoder_net=decoder_net, num_vals=num_vals)\n",
    "\n",
    "# Second, we initialize the prior\n",
    "if prior_name == 'gtm':\n",
    "    gtm_net = nn.Sequential(nn.Linear(2, M), nn.Tanh(),\n",
    "                          nn.Linear(M, M), nn.Tanh(),\n",
    "                          nn.Linear(M, 2 * L))\n",
    "\n",
    "    prior = GTMPrior(L=L, gtm_net=gtm_net, num_components=num_components, u_min=-10., u_max=10.)\n",
    "\n",
    "elif prior_name == 'vampprior':\n",
    "    prior = VampPrior(L=L, D=D, num_vals=num_vals, encoder=encoder, num_components=num_components)\n",
    "\n",
    "elif prior_name == 'data-vampprior-avg':\n",
    "    prior = VampPrior(L=L, D=D, num_vals=num_vals, encoder=encoder, num_components=num_components, data=torch.from_numpy(np.mean(train_data.data, 0, keepdims=True)))\n",
    "\n",
    "elif prior_name == 'data-vampprior-raw':\n",
    "    prior = VampPrior(L=L, D=D, num_vals=num_vals, encoder=encoder, num_components=num_components, data=torch.from_numpy(train_data[:num_components]))\n",
    "\n",
    "elif prior_name == 'gtm-vampprior':\n",
    "    gtm_net_vamp = nn.Sequential(nn.Linear(2, M), nn.Tanh(),\n",
    "                          nn.Linear(M, M), nn.Tanh(),\n",
    "                          nn.Linear(M, D), nn.Sigmoid())\n",
    "    prior = GTMVampPrior(L=L, D=D, gtm_net=gtm_net_vamp, encoder=encoder, num_components=num_components, u_min=-10., u_max=10., num_vals=num_vals)\n",
    "\n",
    "elif prior_name == 'mog':\n",
    "    prior = MoGPrior(L=L, num_components=num_components)\n",
    "\n",
    "elif prior_name == 'standard':\n",
    "    prior = StandardPrior(L=L)\n",
    "\n",
    "elif prior_name == 'flow':\n",
    "    num_flows = 3\n",
    "\n",
    "    # scale (s) network\n",
    "    nets = lambda: nn.Sequential(nn.Linear(L // 2, M), nn.LeakyReLU(),\n",
    "                                nn.Linear(M, M), nn.LeakyReLU(),\n",
    "                                nn.Linear(M, L // 2), nn.Tanh())\n",
    "\n",
    "    # translation (t) network\n",
    "    nett = lambda: nn.Sequential(nn.Linear(L // 2, M), nn.LeakyReLU(),\n",
    "                                nn.Linear(M, M), nn.LeakyReLU(),\n",
    "                                nn.Linear(M, L // 2))\n",
    "\n",
    "    prior = FlowPrior(nets, nett, num_flows=num_flows, D=L)\n",
    "\n",
    "# Eventually, we initialize the full model\n",
    "model = VAE(encoder=encoder, decoder=decoder, prior=prior, num_vals=num_vals, L=L, likelihood_type=likelihood_type)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "3SzTemY3NSxO"
   },
   "source": [
    "**Optimizer - here we use Adamax**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "R9TZtLVtLoWc"
   },
   "outputs": [],
   "source": [
    "# OPTIMIZER\n",
    "optimizer = torch.optim.Adamax([p for p in model.parameters() if p.requires_grad == True], lr=lr)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "dNf__W_ONVHA"
   },
   "source": [
    "**Training loop**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "KhqHgluGLqIC",
    "outputId": "c52fa1e4-3376-4bff-9f87-6f03613c4e42"
   },
   "outputs": [],
   "source": [
    "# Training procedure\n",
    "nll_val = training(name=result_dir + name, max_patience=max_patience, num_epochs=num_epochs, model=model, optimizer=optimizer,\n",
    "                       training_loader=training_loader, val_loader=val_loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "-3XTxgEcNXfp"
   },
   "source": [
    "**The final evaluation**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "okK1mV_-LrRU",
    "outputId": "4664693f-742d-4453-94cf-d051d2efa9be"
   },
   "outputs": [],
   "source": [
    "test_loss = evaluation(name=result_dir + name, test_loader=test_loader)\n",
    "f = open(result_dir + name + '_test_loss.txt', \"w\")\n",
    "f.write(str(test_loss))\n",
    "f.close()\n",
    "\n",
    "samples_real(result_dir + name, test_loader)\n",
    "\n",
    "plot_curve(result_dir + name, nll_val)\n",
    "\n",
    "samples_generated(result_dir + name, test_loader, extra_name='FINAL')"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "collapsed_sections": [],
   "name": "vae_priors.ipynb",
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
