import math
import torch
from torch import nn
import argparse

# Data
from varflow.data.loaders.image import CIFAR10

# Model
from varflow.flows import Flow, FlowLayer
from varflow.sampler import Rejection, Logits
from varflow.transforms import VAE, AffineCouplingBijection, ActNormBijection, Reverse, UniformDequantization
from varflow.distributions import StandardNormal, ConditionalNormal, ConditionalBernoulliProb
from varflow.nn.nets import MLP
from varflow.nn.layers import ElementwiseParams
from varflow.optim.schedulers import LinearWarmupScheduler

# Optim
from torch.optim import Adam
from varflow.utils import iwbo_nats, loglik_bpd, sum_except_batch

# Plot
import torchvision.utils as vutils

############
## Device ##
############

device = 'cuda' if torch.cuda.is_available() else 'cpu'

##########
## Data ##
##########

data = CIFAR10()
train_loader, test_loader = data.get_data_loaders(128)

###########
## Model ##
###########

latent_size = 64
img_repr_dims = 3

class FlowVAE(FlowLayer):
    def __init__(self, decoder, encoder):
        super().__init__()
        self.decoder = decoder
        self.encoder = encoder

    def log_prob(self, x):
        z, log_qz = self.encoder.sample_with_log_prob(context=x)
        log_px = self.decoder.log_prob(x, context=z)
        return self.base_dist.log_prob(z) + log_px - log_qz

    def sample(self, num_samples):
        x = self.base_dist.sample(num_samples)
        return self.decoder.sample(context=x)

class SRVAE(FlowVAE):
    def __init__(self, latent_size, decoder, encoder):
        super().__init__(decoder, encoder)
        self.pool = nn.AvgPool2d(2, 2)
        self.up = nn.UpsamplingNearest2d(scale_factor=2)

    def log_prob(self, x):
        x0 = self.pool(x)
        ux0 = self.up(x0)
        z, log_qz = self.encoder.sample_with_log_prob(context=torch.cat([x, ux0], dim=1))
        log_px = self.decoder.log_prob(x - ux0, torch.cat([z, ux0], dim=1))
        z_dist = StandardNormal(z.shape[1:])
        return self.base_dist.log_prob(x0) + z_dist.log_prob(z) + log_px - log_qz

    def sample(self, num_samples):
        x0 = self.base_dist.sample(num_samples)
        ux0 = self.up(x0)
        z_dist = StandardNormal((latent_size, *ux0.shape[2:])).to(ux0)
        z = z_dist.sample(num_samples)
        return ux0 + self.decoder.sample(context=torch.cat([z, ux0], dim=1))


model = StandardNormal((latent_size, 1, 1))
model = FlowVAE(
    encoder=ConditionalNormal(nn.Sequential(
        nn.Conv2d(img_repr_dims, 256, 2),
        nn.ReLU(),
        nn.Conv2d(256, latent_size*2, 1),
    ), 1),
    decoder=ConditionalNormal(nn.Sequential(
        nn.Conv2d(latent_size, 256, 1),
        nn.ReLU(),
        nn.ConvTranspose2d(256, img_repr_dims*2, 2),
    ), 1),
)(model)


def get_sr():
    sr_encoder = ConditionalNormal(nn.Sequential(
        nn.Conv2d(img_repr_dims*2, 256, 3, 1, 1),
        nn.ReLU(),
        nn.Conv2d(256, 512, 3, 1, 1),
        nn.ReLU(),
        nn.Conv2d(512, latent_size*2, 3, 1, 1),
    ), 1)
    sr_decoder = ConditionalNormal(nn.Sequential(
        nn.Conv2d(latent_size + img_repr_dims, 256, 3, 1, 1),
        nn.ReLU(),
        nn.Conv2d(256, 512, 3, 1, 1),
        nn.ReLU(),
        nn.Conv2d(512, img_repr_dims*2, 3, 1, 1),
    ), 1)
    return SRVAE(img_repr_dims, encoder=sr_encoder, decoder=sr_decoder)


model = get_sr()(model)
model = get_sr()(model)
model = get_sr()(model)
model = get_sr()(model)

# model = FlowVAE(
#     encoder=ConditionalNormal(nn.Sequential(
#         nn.Conv2d(3, 256, 3, 1, 1),
#         nn.ReLU(),
#         nn.Conv2d(256, img_repr_dims*2, 3, 1, 1),
#     ), 1),
#     decoder=ConditionalBernoulliProb(nn.Sequential(
#         nn.Conv2d(img_repr_dims, 256, 3, 1, 1),
#         nn.ReLU(),
#         nn.Conv2d(256, 3, 3, 1, 1),
#     )),
# )(model)
model = Flow([UniformDequantization(8)])(model)
model = model.to(device)

###########
## Optim ##
###########


optimizer = Adam(model.parameters(), lr=1e-3)
scheduler_iter = LinearWarmupScheduler(optimizer, total_epoch=1000)

###########
## Train ##
###########

print('Training...')
for epoch in range(100):
    l = 0.0
    for i, x in enumerate(train_loader):
        optimizer.zero_grad()
        loss = -model.log_prob(x.to(device)).sum() / (math.log(2) * x.numel())
        loss.backward()
        optimizer.step()
        scheduler_iter.step()
        l += loss.detach().cpu().item()
        print('Epoch: {}/{}, Iter: {}/{}, Bits/dim: {:.3f}'.format(epoch+1, 10, i+1, len(train_loader), l/(i+1)), end='\r')
    print('')
    with torch.no_grad():
        samples = model.sample(64) / 255.
        vutils.save_image(samples.cpu().float(), fp=f'mnist_mvae_e{epoch}.png', nrow=8)

##########
## Test ##
##########

print('Testing...')
with torch.no_grad():
    l = 0.0
    for i, x in enumerate(test_loader):
        loss = -model.log_prob(x.to(device)).sum() / (math.log(2) * x.numel())
        l += loss.detach().cpu().item()
        print('Iter: {}/{}, Bits/dim: {:.3f}'.format(i+1, len(test_loader), l/(i+1)), end='\r')
    print('')

############
## Sample ##
############

print('Sampling...')
img = next(iter(test_loader))[:64] / 255.

samples = model.sample(64) / 255.

vutils.save_image(img.cpu().float(), fp='mnist_data.png', nrow=8)
vutils.save_image(samples.cpu().float(), fp='mnist_mvae.png', nrow=8)
