import torch
from torch import nn
import matplotlib.pyplot as plt
import math
import numpy as np

from sklearn import datasets


data = datasets.make_moons(n_samples=1000, noise=.05)[0].astype(np.float32)
data = torch.from_numpy(data)

class LeakyReLu:
    def __call__(self, x, s):
        return torch.where(x > 0, x, x * s.exp())

    def inv(self, x, s):
        return self.__call__(x, -s)

    def grad(self, x, s):
        return (x >= 0) + (x < 0) * s.exp()


class Flow(nn.Module):
    def __init__(self, dims=1):
        super().__init__()
        self.d = dims

    def sample(self, n=1):
        dist = torch.distributions.Normal(0, 1)
        return dist.sample((n, self.d))

    def log_prob(self, x):
        dist = torch.distributions.Normal(0, 1)
        return dist.log_prob(x).sum(-1)


class VAE(Flow):
    def __init__(self, p: Flow, dims=1, units=4):
        super().__init__()
        self.prev = p
        self.u = units
        self.d = dims
        self.q = nn.Sequential(
            nn.Linear(dims, 128),
            nn.LeakyReLU(),
            nn.Linear(128, 128),
            nn.LeakyReLU(),
            nn.Linear(128, 2 * units),
        )
        self.p = nn.Sequential(
            nn.Linear(units, 128),
            nn.LeakyReLU(),
            nn.Linear(128, 128),
            nn.LeakyReLU(),
            nn.Linear(128, 2 * dims),
        )

    def sample(self, n=1):
        z = self.prev.sample(n)
        theta = self.p(z)
        print(z, theta)
        p_dist = torch.distributions.Normal(
            theta[..., :self.d], theta[..., self.d:].exp())
        return p_dist.sample()

    def log_prob(self, x):
        phi = self.q(x)
        q_dist = torch.distributions.Normal(
            phi[..., :self.u], phi[..., self.u:].exp())
        z = q_dist.sample()
        theta = self.p(z)
        p_dist = torch.distributions.Normal(
            theta[..., :self.d], theta[..., self.d:].exp())
        return self.prev.log_prob(z) + p_dist.log_prob(x).sum(-1) - q_dist.log_prob(z).sum(-1)


flow = Flow(dims=4)
flow = VAE(flow, dims=4)
flow = VAE(flow, dims=2)
optim = torch.optim.SGD(flow.parameters(), 1e-4, momentum=0.9, weight_decay=1e-4)
# optim = torch.optim.Adam(flow.parameters(), 1e-3)

# data = data.to('cuda')
# flow = flow.to('cuda')

plt.ion()
for _ in range(10000):
    loss = -flow.log_prob(data[:, None]).mean()
    optim.zero_grad()
    loss.backward()
    optim.step()
    print(loss.item())

    if _ % 1000 == 0:
        with torch.no_grad():
            plt.scatter(data.numpy()[..., 0], data.numpy()[..., 1], alpha=0.5)
            plt.scatter(flow.sample(1000).numpy()[..., 0], flow.sample(1000).numpy()[..., 1], alpha=0.5)
            plt.pause(0.1)
            plt.clf()

plt.legend()
plt.show()
