import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import torch.optim
from DiffOGMP.models.VAEbase import BaseVAE
from DiffOGMP.modules.distributions.distributions import DiagonalGaussianDistribution

class ResBlock(nn.Module):
    def __init__(self, in_channels, num_hiddens, num_residual_hiddens):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = num_hiddens
        self._block = nn.Sequential(
            nn.ReLU(),
            nn.Conv2d(in_channels=in_channels,
                      out_channels=num_residual_hiddens,
                      kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(num_residual_hiddens),
            nn.ReLU(),
            nn.Conv2d(in_channels=num_residual_hiddens,
                      out_channels=num_hiddens,
                      kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(num_hiddens)
        )
        if in_channels != num_hiddens:
            self.conv_shortcut = nn.Conv2d(in_channels=in_channels,
                                        out_channels=num_hiddens,
                                        kernel_size=3,
                                        stride=1,
                                        padding=1)
        
    def forward(self, x):
        h = x
        if self.in_channels != self.out_channels:
            h = self._block(h)
            x = self.conv_shortcut(x)
            return x + h
        else:
            return x + self._block(h)

class ResidualStack(nn.Module):
    def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
        super().__init__()
        self._num_residual_layers = num_residual_layers
        res_stack = [ResBlock(in_channels, num_residual_hiddens, num_residual_hiddens)]
        for _ in range(1, self._num_residual_layers):
            res_stack.append(ResBlock(num_residual_hiddens, num_hiddens, num_residual_hiddens))
        self._layers = nn.ModuleList(res_stack)
        
    def forward(self, x):
        for i in range(self._num_residual_layers):
            x = self._layers[i](x)
        return F.relu(x)

class Encoder(nn.Module):
    def __init__(self, 
                 in_channels, 
                 num_hiddens, 
                 num_residual_layers, 
                 num_residual_hiddens,
                 z_channels):
        super().__init__()
        self.conv1 = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                              out_channels=num_hiddens//2,
                                              kernel_size=4,
                                              stride=2,
                                              padding=1),
                                    nn.BatchNorm2d(num_hiddens//2),
                                    nn.ReLU())
        self.conv2 = nn.Sequential(nn.Conv2d(in_channels=num_hiddens//2,
                                              out_channels=num_hiddens,
                                              kernel_size=4,
                                              stride=2,
                                              padding=1),
                                    nn.BatchNorm2d(num_hiddens))
        self.residual_stack = ResidualStack(in_channels=num_hiddens,
                                             num_hiddens=z_channels * 2,
                                             num_residual_layers=num_residual_layers,
                                             num_residual_hiddens=num_residual_hiddens)
    
    def forward(self, inputs):
        x = self.conv1(inputs)
        x = self.conv2(x)
        enc_out = self.residual_stack(x)
        return enc_out
        
    
class Decoder(nn.Module):
    def __init__(self, out_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
        super().__init__()
        self._residual_stack = ResidualStack(in_channels=num_hiddens,
                                             num_hiddens=num_hiddens,
                                             num_residual_layers=num_residual_layers,
                                             num_residual_hiddens=num_residual_hiddens)
        self._conv_trans2 = nn.Sequential(nn.ReLU(),
                                          nn.ConvTranspose2d(in_channels=num_hiddens,
                                                             out_channels=num_hiddens//2,
                                                             kernel_size=4,
                                                             stride=2,
                                                             padding=1),
                                          nn.BatchNorm2d(num_hiddens//2),
                                          nn.ReLU())
        self._conv_trans1 = nn.Sequential(nn.ConvTranspose2d(in_channels=num_hiddens//2,
                                                             out_channels=num_hiddens//2,
                                                             kernel_size=4,
                                                             stride=2,
                                                             padding=1),
                                          nn.BatchNorm2d(num_hiddens//2),
                                          nn.ReLU(),
                                          nn.Conv2d(in_channels=num_hiddens//2,
                                                    out_channels=out_channels,
                                                    kernel_size=3,
                                                    stride=1,
                                                    padding=1),
                                          nn.Sigmoid())
        
    def forward(self, inputs):
        x = self._residual_stack(inputs)
        x = self._conv_trans2(x)
        x = self._conv_trans1(x)
        return x
    
class BasicVAELoss(nn.Module):
    def __init__(self, decay_step=5000):
        super().__init__()
        self.decay_step = decay_step
        self.kl_weight = 1 / decay_step
        
    def forward(self, inputs, reconstructoins, posteriors, global_step, split='train'):
        rec_loss = F.binary_cross_entropy(reconstructoins.contiguous(), inputs.contiguous(), reduction='none')
        rec_loss = torch.sum(rec_loss) / rec_loss.shape[0]
        kl_loss = posteriors.kl()
        kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
        self.kl_weight = global_step / self.decay_step
        self.kl_weight = self.kl_weight if self.kl_weight < 1 else 1
        loss = rec_loss + self.kl_weight * kl_loss
        log = {f'{split}/total_loss': loss.clone().detach().mean(),
               f'{split}/kl_loss': kl_loss.detach().mean(),
               f'{split}/rec_loss': rec_loss.detach().mean()}
        return loss, log
    
class BasicVae(BaseVAE):
    def __init__(self, 
                 encoderconfig,
                 decoderconfig,
                 embed_dim,
                 ckpt_path=None,
                 ignore_keys=[],
                 image_key='maps',
                 monitor=None):
        super().__init__()
        self.image_key = image_key
        self.encoder = Encoder(**encoderconfig)
        self.decoder = Decoder(**decoderconfig)
        self.loss = BasicVAELoss()
        self.quant_conv = nn.Conv2d(2 * encoderconfig['z_channels'], 2 * embed_dim, 1)
        self.post_quant_conv = nn.Conv2d(embed_dim, encoderconfig['z_channels'], 1)
        self.embed_dim = embed_dim
        if monitor is not None:
            self.monitor = monitor
        if ckpt_path is not None:
            self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
    def init_from_ckpt(self, path, ignore_kyes=[]):
        sd = torch.load(path, map_location='cpu')['state_dict']
        keys = list(sd.keys())
        for k in keys:
            for ik in ignore_kyes:
                if k.startwith(ik):
                    print(f"Deleting key {k} from state_dict")
                    del sd[k]
        self.load_state_dict(sd, strict=False)
        print(f"Restored from {path}")
        
    def encode(self, input):
        h = self.encoder(input)
        moments = self.quant_conv(h)
        posterior = DiagonalGaussianDistribution(moments)
        return posterior
    
    def decode(self, z):
        z = self.post_quant_conv(z)
        dec = self.decoder(z)
        return dec
        
    def forward(self, x, sample_posterior=True):
        posterior = self.encode(x)
        if sample_posterior:
            z = posterior.sample()
        else:
            z = posterior.mode()
        dec = self.decode(z)
        return dec, posterior
    
    def get_input(self, batch, k):
        x = batch[k]
        if len(x.shape) == 3:
            x = x[..., None]
        return x
    
    def configure_optimizers(self):
        lr = self.learning_rate
        opt = torch.optim.Adam(list(self.encoder.parameters())+
                               list(self.decoder.parameters())+
                               list(self.quant_conv.parameters())+
                               list(self.post_quant_conv.parameters()),
                               lr=lr)
        return [opt], []
        
    def training_step(self, batch, batch_idx):
        inputs = self.get_input(batch, self.image_key)
        reconstructions, posterior = self(inputs)
        aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, self.global_step, split='train')
        self.log('aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
        self.log('kl_weight', self.loss.kl_weight, prog_bar=True, logger=True, on_step=True, on_epoch=True)
        self.log_dict(log_dict_ae, prog_bar=True, logger=True, on_step=True, on_epoch=True)
        return aeloss
    
    def validation_step(self, batch, batch_idx):
        inputs = self.get_input(batch, self.image_key)
        reconstructions, posterior = self(inputs)
        aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, self.global_step, split='val')
        self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
        self.log_dict(log_dict_ae)
        return self.log_dict
    
    @torch.no_grad()
    def log_images(self, batch, only_inputs=False, **kwargs):
        log = dict()
        x = self.get_input(batch, self.image_key)
        x = x.to(self.device)
        if not only_inputs:
            x_rec, posterior = self(x)
            log['samples'] = self.decode(torch.randn_like(posterior.sample()))
            log['reconstructions'] = x_rec
        log['inputs'] = x
        return log