import torch
from torch import nn
import matplotlib.pyplot as plt
from torchvision.transforms import ToTensor
import math
from data import DataSampler, load_cifar10

batch_size = 64
import torchvision.datasets as datasets

train_sampler = torch.utils.data.DataLoader(
    datasets.CIFAR10('./data', download=True, transform=ToTensor()),
    batch_size=batch_size)

class LeakyReLu:
    def __call__(self, x, s):
        return torch.where(x > 0, x, x * s.exp())

    def inv(self, x, s):
        return self.__call__(x, -s)

    def grad(self, x, s):
        return (x >= 0) + (x < 0) * s.exp()


class Flow(nn.Module):
    def sample(self, n=1, dims=256):
        return torch.randn((n, dims))

    def log_prob(self, x):
        return -(x ** 2) / 2 - math.log(math.sqrt(2 * math.pi))


class MultiFlow(Flow):
    def __init__(self, p=Flow(), act=LeakyReLu(), dims=256, units=4):
        super().__init__()
        self.p = p
        self.act = act
        self.d = nn.Parameter(torch.randn((dims, units)))
        self.w_inner = nn.Parameter(torch.randn((units)))
        self.b_inner = nn.Parameter(torch.randn((units)))
        self.w_outer = nn.Parameter(torch.randn((units)))
        self.b_outer = nn.Parameter(torch.randn((units)))
        self.s = nn.Parameter(torch.zeros((units)))
        self.alpha = nn.Parameter(torch.randn((units)))
    
    def sample(self, n=1):
        z = self.p.sample(n)
        i = torch.distributions.Categorical(logits=self.alpha).sample(n)
        wo, bo, wi, bi = self.w_outer[i], self.w_outer[i], self.w_inner[i], self.w_inner[i]
        return wo * self.act(wi * z + bi, self.s[i]) + bo

    def log_prob(self, x):
        # x: b * dim * 1
        # d: u * d * 1
        u = torch.sum(self.d * x[..., None], -2) / self.d.pow(2).sum(0)
        v = x[..., None] - u[:, None] * self.d
        inner = (u - self.b_outer) / self.w_outer
        w = (self.act.inv(inner, self.s) - self.b_inner) / self.w_inner
        J = self.w_outer * self.w_inner * self.act.grad(inner, self.s)
        z = v + w[:, None] * self.d
        print(z.shape, J.shape)
        log_prob = nn.functional.log_softmax(self.alpha, -1) + self.p.log_prob(z) - J.abs().log()
        q = torch.log(torch.sum(log_prob.exp(), -1) + 1e-12)
        return q


flow = MultiFlow()
# flow = MultiFlow(flow)

optim = torch.optim.SGD(flow.parameters(), 0.01, momentum=0.9, weight_decay=1e-4)

for i, (X_batch, y_batch) in enumerate(train_sampler):
    X_batch = X_batch.mean(1)[:, ::2, ::2].flatten(1)
    loss = -flow.log_prob(X_batch).mean()
    optim.zero_grad()
    loss.backward()
    optim.step()
    print(loss.item())
