import os
import math
import torch
from tqdm import tqdm

# Data
from varflow.data.loaders.image import CelebA, CIFAR10

# Model
import torch.nn as nn
from varflow.flows import Flow
from varflow.distributions import StandardNormal, StandardUniform, ConditionalNormal, ConditionalNormalLoc, ConditionalBernoulliProb, ConditionalBernoulli
from varflow.transforms import AffineCouplingBijection, ActNormBijection2d, Conv1x1
from varflow.transforms import UniformDequantization, Augment, Squeeze2d, Slice, VAE
from varflow.nn.layers import ElementwiseParams2d
from varflow.nn.nets import MLP, DenseNet
from varflow.optim.schedulers import LinearWarmupScheduler
from varflow.distributions import DataParallelDistribution

# Optim
from torch.optim import Adam

# Plot
import torchvision.utils as vutils

############
## Device ##
############

device = 'cuda' if torch.cuda.is_available() else 'cpu'

###########
## Model ##
###########

latent_size = 128

class Encoder(ConditionalNormal):
    def __init__(self, in_channels=3, latent_dim=latent_size, hidden_dims=[32, 64, 128, 256, 512]):
        modules = []
        for h_dim in hidden_dims:
            modules.append(
                nn.Sequential(
                    nn.Conv2d(in_channels, h_dim, 3, 2, 1),
                    nn.BatchNorm2d(h_dim),
                    nn.LeakyReLU())
            )
            in_channels = h_dim
        modules.append(nn.Conv2d(in_channels, latent_dim*2, 2))
        super().__init__(nn.Sequential(*modules), 1)

class Decoder(ConditionalNormalLoc):
    def __init__(self, latent_dim=latent_size, hidden_dims=[32, 64, 128, 256, 512]):
        modules = [nn.ConvTranspose2d(latent_dim, hidden_dims[-1], 2)]
        hidden_dims.reverse()
        for i in range(len(hidden_dims) - 1):
            modules.extend([
                nn.ConvTranspose2d(hidden_dims[i],
                                    hidden_dims[i + 1],
                                    kernel_size=3,
                                    stride = 2,
                                    padding=1,
                                    output_padding=1),
                nn.BatchNorm2d(hidden_dims[i + 1]),
                nn.LeakyReLU()
            ])

        modules.extend([
            nn.ConvTranspose2d(hidden_dims[-1], hidden_dims[-1], 3, 2, 1, 1),
            nn.BatchNorm2d(hidden_dims[-1]),
            nn.LeakyReLU(),
            nn.Conv2d(hidden_dims[-1], 2*3, 3, 1, 1),
        ])
        super().__init__(nn.Sequential(*modules), 1)


model = Flow(base_dist=StandardNormal((latent_size,1,1)),
             transforms=[VAE(Decoder(), Encoder())])

model = Flow([UniformDequantization()])(model)

model = model.to(device)

##########
## Data ##
##########

data = CelebA()
train_loader, val_loader, test_loader = data.get_data_loaders(128)

###########
## Optim ##
###########

optimizer = Adam(model.parameters(), lr=1e-3, weight_decay=1e-4)
scheduler = LinearWarmupScheduler(optimizer, 1000)

###########
## Train ##
###########

log_dir = 'celeba_vae'
os.makedirs(log_dir, exist_ok=True)

# import wandb
# wandb.init(project='ganvaeflow', entity='yuangzh')
from tensorboardX import SummaryWriter
writer = SummaryWriter(flush_secs=30)

print('Training...')
g_iter = 0
for epoch in range(200):
    l = 0.0
    pbar = tqdm(train_loader)
    for i, x in enumerate(pbar):
        optimizer.zero_grad()
        loss = -model.log_prob(x.to(device)).sum() / (math.log(2) * x.numel())
        loss.backward()
        optimizer.step()
        scheduler.step()
        l += loss.item()
        pbar.set_description_str(f'Epoch: {epoch+1}/{100}, Bits/dim: {l / (i + 1):.3f}')
        writer.add_scalar('Train/loss', loss, g_iter)
        g_iter += 1
    torch.save(model.state_dict(), f"{log_dir}/model_e{epoch:04d}.pt")
    with torch.no_grad():
        model.eval()
        fp = f'{log_dir}/celeba_vae_e{epoch}.png'
        torch.manual_seed(0)
        samples = model.sample(64).cpu().float()/255
        vutils.save_image(samples, fp=fp, nrow=8)
        # wandb.log({"bpd": l / len(train_loader), "samples": wandb.Image(fp)})
        writer.add_scalar('Train/bpd',l / len(train_loader), g_iter)
        writer.add_images("samples", samples, g_iter)
        model.train()

torch.save(model.state_dict(), f"{log_dir}/last.pt")

##########
## Test ##
##########

print('Testing...')
model.eval()
with torch.no_grad():
    l = 0.0
    pbar = tqdm(test_loader)
    for i, x in enumerate(pbar):
        loss = -model.log_prob(x.to(device)).sum() / (math.log(2) * x.numel())
        l += loss.detach().cpu().item()
        pbar.set_description_str(f'Bits/dim: {l / (i + 1):.3f}')
final_test_nats = l / len(test_loader)

# Save log-likelihood
with open('celeba_bpd.txt', 'w') as f:
    f.write(str(final_test_nats))

############
## Sample ##
############

print('Sampling...')
img = next(iter(test_loader))[:64]
with torch.no_grad():
    samples = model.sample(64)

vutils.save_image(img.cpu().float()/255, fp='celeba_data.png', nrow=8)
vutils.save_image(samples.cpu().float()/255, fp='celeba_vae.png', nrow=8)
