import torch
from torch import nn
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import math
import numpy as np

from torchvision.datasets import MNIST
from torchvision import transforms

mnist_train = MNIST('./', download=False,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                    ]), train=True)

dataloader = DataLoader(mnist_train, batch_size=32)

class Bernoulli(torch.distributions.Bernoulli):
    def log_prob(self, x):
        return -nn.functional.binary_cross_entropy_with_logits(self.logits, x)

class Normal(torch.distributions.Normal):
    def kl_prior(self):
        return -self.scale.log() + 0.5 * (self.scale.pow(2) + self.loc.pow(2) - 1)


class Flow(nn.Module):
    def __init__(self, dims=1):
        super().__init__()
        self.d = dims

    def sample(self, n=1):
        dist = Normal(0, 1)
        return dist.sample((n, self.d))

    def log_prob(self, x):
        dist = Normal(0, 1)
        return dist.log_prob(x).sum(-1)

class VAE(Flow):
    def __init__(self, p: Flow, dims=1, units=4):
        super().__init__()
        self.prev = p
        self.u = units
        self.d = dims
        self.q = nn.Sequential(
            nn.Linear(dims, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 2 * units),
        )
        self.p = nn.Sequential(
            nn.Linear(units, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, dims),
        )

    def sample(self, n=1):
        z = self.prev.sample(n)
        theta = self.p(z)
        return theta.sigmoid()

    def log_prob(self, x):
        x = x.clamp(max=0.99, min=0.01)
        phi = self.q(x)
        q_dist = Normal(phi[..., :self.u], phi[..., self.u:].exp())
        z = q_dist.sample()
        theta = self.p(z)
        p_dist = Bernoulli(logits=theta[..., :self.d])
        return p_dist.log_prob(x).sum(-1) - q_dist.kl_prior()
        # return self.prev.log_prob(z) + p_dist.log_prob(x).sum(-1) - q_dist.log_prob(z).sum(-1)


flow = Flow(dims=20)
flow = VAE(flow, dims=784, units=20)
optim = torch.optim.SGD(flow.parameters(), 1e-2, momentum=0.9, weight_decay=1e-4)
# optim = torch.optim.Adam(flow.parameters(), 1e-3, weight_decay=1e-4)

plt.ion()

for i, (X, y) in enumerate(dataloader):
    loss = -flow.log_prob(X.flatten(1)).mean()
    optim.zero_grad()
    loss.backward()
    optim.step()
    print(loss.item())
    if i % 100 == 0:
        with torch.no_grad():
            img = flow.sample()
            plt.imshow(img.reshape(28, 28))
            plt.pause(0.01)
