import math
import torch

# Data
from varflow.data.loaders.image import MNIST

# Model
import torch.nn as nn
from varflow.flows import Flow
from varflow.distributions import StandardNormal, StandardUniform
from varflow.transforms import AffineCouplingBijection, ActNormBijection2d, Conv1x1
from varflow.transforms import UniformDequantization, Augment, Squeeze2d, Slice
from varflow.nn.layers import ElementwiseParams2d
from varflow.nn.nets import DenseNet

# Optim
from torch.optim import Adam

# Plot
import torchvision.utils as vutils

############
## Device ##
############

device = 'cuda' if torch.cuda.is_available() else 'cpu'

##########
## Data ##
##########

data = MNIST()
train_loader, test_loader = data.get_data_loaders(16)

###########
## Model ##
###########

def net(channels):
  return nn.Sequential(DenseNet(in_channels=channels//2,
                                out_channels=channels,
                                num_blocks=1,
                                mid_channels=64,
                                depth=8,
                                growth=16,
                                dropout=0.0,
                                gated_conv=True,
                                zero_init=True),
                        ElementwiseParams2d(2))

model = Flow(base_dist=StandardNormal((16,7,7)),
             transforms=[
               UniformDequantization(num_bits=8),
               Augment(StandardUniform((1,28,28)), x_size=1),
               AffineCouplingBijection(net(2)), ActNormBijection2d(2), Conv1x1(2),
               AffineCouplingBijection(net(2)), ActNormBijection2d(2), Conv1x1(2),
               AffineCouplingBijection(net(2)), ActNormBijection2d(2), Conv1x1(2),
               AffineCouplingBijection(net(2)), ActNormBijection2d(2), Conv1x1(2),
               Squeeze2d(), Slice(StandardNormal((4,14,14)), num_keep=4),
               AffineCouplingBijection(net(4)), ActNormBijection2d(4), Conv1x1(4),
               AffineCouplingBijection(net(4)), ActNormBijection2d(4), Conv1x1(4),
               AffineCouplingBijection(net(4)), ActNormBijection2d(4), Conv1x1(4),
               AffineCouplingBijection(net(4)), ActNormBijection2d(4), Conv1x1(4),
               Squeeze2d(), Slice(StandardNormal((16,7,7)), num_keep=8),
               AffineCouplingBijection(net(8)), ActNormBijection2d(8), Conv1x1(8),
               AffineCouplingBijection(net(8)), ActNormBijection2d(8), Conv1x1(8),
               AffineCouplingBijection(net(8)), ActNormBijection2d(8), Conv1x1(8),
               AffineCouplingBijection(net(8)), ActNormBijection2d(8), Conv1x1(8),
             ]).to(device)

###########
## Optim ##
###########

optimizer = Adam(model.parameters(), lr=1e-3)

###########
## Train ##
###########

print('Training...')
for epoch in range(10):
    l = 0.0
    for i, x in enumerate(train_loader):
        optimizer.zero_grad()
        loss = -model.log_prob(x.to(device)).sum() / (math.log(2) * x.numel())
        loss.backward()
        optimizer.step()
        l += loss.detach().cpu().item()
        print('Epoch: {}/{}, Iter: {}/{}, Bits/dim: {:.3f}'.format(epoch+1, 10, i+1, len(train_loader), l/(i+1)), end='\r')
    print('')

##########
## Test ##
##########

print('Testing...')
with torch.no_grad():
    l = 0.0
    for i, x in enumerate(test_loader):
        loss = -model.log_prob(x.to(device)).sum() / (math.log(2) * x.numel())
        l += loss.detach().cpu().item()
        print('Iter: {}/{}, Bits/dim: {:.3f}'.format(i+1, len(test_loader), l/(i+1)), end='\r')
    print('')

############
## Sample ##
############

print('Sampling...')
img = torch.from_numpy(data.test.data[:64]).permute([0,3,1,2])
samples = model.sample(64)

vutils.save_image(img.cpu().float()/255, fp='mnist_data.png', nrow=8)
vutils.save_image(samples.cpu().float()/255, fp='mnist_aug_flow.png', nrow=8)
