import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision

from torch.autograd import grad
from einops import rearrange, pack, unpack, repeat
from einops.layers.torch import Rearrange
from vector_quantize_pytorch import LFQ, VectorQuantize

from sgm.modules.attention import Transformer
from sgm.modules.position_bias import ContinuousPositionBias
from sgm.modules.vq.discriminator import Discriminator
from sgm.modules.vq.loss import HingeGenLoss, BCEGenLoss, HingeDiscLoss, BCEDiscLoss
from sgm.modules.vq.quantizers import VectorQuantizer, GumbelQuantizer
from sgm.utils import pair


def pick_video_frame(video, pick_frame_indices):
    b = video.shape[0]
    video = rearrange(video, 'b c f ... -> b f c ...')
    batch_indices = torch.arange(b, device=video.device)
    batch_indices = rearrange(batch_indices, 'b -> b 1')
    images = video[batch_indices, pick_frame_indices]
    images = rearrange(images, 'b 1 c ... -> b c ...')
    return images


def get_layer_grad(loss, layer):
    return grad(
        outputs=loss,
        inputs=layer,
        grad_outputs=torch.ones_like(loss),
        retain_graph=True
    )[0].detach()


def safe_div(divisor, dividend, eps=1e-8):
    return divisor / (dividend + eps)


def gradient_penalty(video, output, weight=10):
    grad_ = grad(
        outputs=output,
        inputs=video,
        grad_outputs=torch.ones(output.size(), device=video.device),
        create_graph=True,
        retain_graph=True,
        only_inputs=True
    )[0]

    grad_ = rearrange(grad_, 'b ... -> b (...)')
    return weight * ((grad_.norm(2, dim=1) - 1) ** 2).mean()


class VQGAN(nn.Module):
    def __init__(self, dim, codebook_size, image_size, image_patch_size, temporal_patch_size,
                 spatial_num_layers, temporal_num_layers, channels=3, num_heads=12, head_size=64, peg=True,
                 discriminator_hidden_size=16, discriminator_attn_res_layers=(16,), use_vgg_and_gan=False,
                 attn_dropout=0., ffn_dropout=0., use_hinge_loss=False, gradient_checkpointing=True,
                 lookup_free_quantization=True, lookup_free_quantization_kwargs=None):
        """
        notations:
            b - batch
            c - channels
            t - frames
            d - feature size
            ph, pw, pt - image patch and then temporal patch size
        """
        super().__init__()

        if lookup_free_quantization_kwargs is None:
            lookup_free_quantization_kwargs = {}
        self.image_path_size = image_patch_size
        self.image_size = image_size
        self.image_height, self.image_width = pair(image_size)
        self.patch_height, self.patch_width = pair(image_patch_size)
        self.patch_temporal = temporal_patch_size

        self.first_frame_to_patch_emb = nn.Sequential(
            Rearrange('b c 1 (h ph) (w pw) -> b 1 h w (c ph pw)', ph=self.patch_height, pw=self.patch_width),
            # nn.LayerNorm(channels * self.patch_width * self.patch_height),
            nn.Linear(channels * self.patch_width * self.patch_height, dim),
            # nn.LayerNorm(dim)
        )

        self.video_to_patch_emb = nn.Sequential(
            Rearrange('b c (t pt) (h ph) (w pw) -> b t h w (c pt ph pw)',
                      ph=self.patch_height, pw=self.patch_width, pt=temporal_patch_size),
            # nn.LayerNorm(channels * self.patch_width * self.patch_height * temporal_patch_size),
            nn.Linear(channels * self.patch_width * self.patch_height * temporal_patch_size, dim),
            # nn.LayerNorm(dim)
        )
        # spatial position bias
        self.spatial_rel_pos_bias = ContinuousPositionBias(dim=dim, num_heads=num_heads)

        # Encoder transformer blocks
        self.spatial_encoder = Transformer(dim=dim, head_size=head_size, num_heads=num_heads,
                                           attn_dropout=attn_dropout, ff_dropout=ffn_dropout,
                                           peg=peg, peg_causal=True, num_layers=spatial_num_layers,
                                           gradient_checkpoint=gradient_checkpointing)
        self.temporal_encoder = Transformer(dim=dim, head_size=head_size, num_heads=num_heads,
                                            attn_dropout=attn_dropout, ff_dropout=ffn_dropout,
                                            peg=peg, peg_causal=True, num_layers=temporal_num_layers,
                                            gradient_checkpoint=gradient_checkpointing)
        # quantizer pytorch
        if lookup_free_quantization:
            lookup_free_quantization_kwargs = {'entropy_loss_weight': 0, 'commitment_loss_weight': 1}
            self.quantizer = LFQ(dim=dim, codebook_size=codebook_size, **lookup_free_quantization_kwargs)
        else:
            self.quantizer = VectorQuantize(dim=dim, codebook_size=codebook_size, use_cosine_sim=True)

        # self.quantizer = VectorQuantizer(embed_dim=dim, num_tokens=codebook_size, beta=0.1)
        self.code_weight = 1.0
        self.pre_quant = nn.Linear(dim, dim)
        self.post_quant = nn.Linear(dim, dim)

        # Decoder transformer blocks
        self.spatial_decoder = Transformer(dim=dim, head_size=head_size, num_heads=num_heads,
                                           attn_dropout=attn_dropout, ff_dropout=ffn_dropout,
                                           peg=peg, peg_causal=True, num_layers=spatial_num_layers,
                                           gradient_checkpoint=gradient_checkpointing)
        self.temporal_decoder = Transformer(dim=dim, head_size=head_size, num_heads=num_heads,
                                            attn_dropout=attn_dropout, ff_dropout=ffn_dropout,
                                            peg=peg, peg_causal=True, num_layers=temporal_num_layers,
                                            gradient_checkpoint=gradient_checkpointing)

        # recon modules
        self.first_frame_to_pixels = nn.Sequential(
            nn.Linear(dim, channels * self.patch_height * self.patch_width),
            Rearrange('b 1 h w (c ph pw) -> b c 1 (h ph) (w pw)',
                      ph=self.patch_height, pw=self.patch_width)
        )
        self.rest_frame_to_pixels = nn.Sequential(
            nn.Linear(dim, channels * self.patch_height * self.patch_width * temporal_patch_size),
            Rearrange('b t h w (c ph pw pt) -> b c (t pt) (h ph) (w pw)',
                      ph=self.patch_height, pw=self.patch_width, pt=temporal_patch_size)
        )

        # perceptual loss
        self.use_vgg_and_gan = use_vgg_and_gan
        if self.use_vgg_and_gan:
            self.vgg = torchvision.models.vgg16(pretrained=True)
            self.vgg.classifier = nn.Sequential(*self.vgg.classifier[:-2])
        else:
            self.discriminator = None
            return

        # gan loss
        self.discriminator = Discriminator(image_size=image_size, dim=discriminator_hidden_size, channels=channels,
                                           attn_resolution_layers=discriminator_attn_res_layers)
        self.gen_loss = HingeGenLoss() if use_hinge_loss else BCEGenLoss()
        self.discr_loss = HingeDiscLoss() if use_hinge_loss else BCEDiscLoss()

    def decode_from_codebook_indices(self, indices):
        if self.lookup_free_quantization:
            codes = self.vq.indices_to_codes(indices)
        else:
            codes = self.vq.codebook[indices]

        return self.decode(codes)

    def encode(self, patched_embed: torch.Tensor):
        b = patched_embed.shape[0]
        h, w = self.image_height // self.patch_height, self.image_width // self.patch_width
        video_shape = tuple(patched_embed.shape[:-1])

        # spatial
        patched_embed = rearrange(patched_embed, 'b t h w d -> (b t) (h w) d')
        position_embedding = self.spatial_rel_pos_bias(h, w, device=patched_embed.device)
        hidden = self.spatial_encoder(patched_embed, attn_bias=position_embedding, video_shape=video_shape)
        hidden = rearrange(hidden, '(b t) (h w) d -> b t h w d', b=b, h=h, w=w)

        # temporal
        hidden = rearrange(hidden, 'b t h w d -> (b h w) t d')
        hidden = self.temporal_encoder(hidden, video_shape=video_shape)
        hidden = rearrange(hidden, '(b h w) t d -> b t h w d', b=b, h=h, w=w)
        return hidden

    def decode(self, tokens):
        b = tokens.shape[0]
        h, w = self.image_height // self.patch_height, self.image_width // self.patch_width

        if tokens.ndim == 3:
            tokens = rearrange(tokens, 'b (t h w) d -> b t h w d', h=h, w=w)
        video_shape = tuple(tokens.shape[:-1])

        # temporal
        hidden = rearrange(tokens, 'b t h w d -> (b h w) t d')
        hidden = self.temporal_decoder(hidden, video_shape=video_shape)
        hidden = rearrange(hidden, '(b h w) t d -> b t h w d', b=b, h=h, w=w)

        # spatial
        hidden = rearrange(hidden, 'b t h w d -> (b t) (h w) d')
        attn_bias = self.spatial_rel_pos_bias(h, w, device=hidden.device)
        hidden = self.spatial_decoder(hidden, attn_bias=attn_bias, video_shape=video_shape)
        hidden = rearrange(hidden, '(b t) (h w) d -> b t h w d', b=b, h=h, w=w)

        # to pixels
        first_frame_token, rest_frames_tokens = tokens[:, :1], tokens[:, 1:]
        first_frame_pixel = self.first_frame_to_pixels(first_frame_token)
        rest_frames_pixels = self.rest_frame_to_pixels(rest_frames_tokens)
        recon_video = torch.cat((first_frame_pixel, rest_frames_pixels), dim=2)
        return recon_video

    def forward(self, video, mask=None, only_return_codebook_ids=False, only_return_recon=False,
                return_discriminator_loss=False, apply_grad_penalty=True):
        assert video.ndim in {4, 5}
        is_image = video.ndim == 4

        if is_image:
            video = rearrange(video, 'b c h w -> b c 1 h w')

        b, c, f, *image_size = video.shape

        first_frame, rest_frames = video[:, :, :1, ...], video[:, :, 1:, ...]

        # b c (1+T) H W --> b (1+t) h w dim
        first_frame_token = self.first_frame_to_patch_emb(first_frame)
        rest_frames_tokens = self.video_to_patch_emb(rest_frames)

        patched_embed = torch.cat((first_frame_token, rest_frames_tokens), dim=1)
        *_, h, w, _ = patched_embed.shape

        # encode patch embedding into tokens
        embed_hidden = self.encode(patched_embed)

        # quantize hidden vector into codes
        embed_hidden, packed_shape = pack([embed_hidden], 'b * d')
        embed_hidden = self.pre_quant(embed_hidden)
        embed_hidden, indices, vq_loss = self.quantizer(embed_hidden)
        embed_hidden = self.post_quant(embed_hidden)

        if only_return_codebook_ids:
            indices, = unpack(indices, packed_shape, 'b *')
            return indices

        embed_hidden = rearrange(embed_hidden, 'b (t h w) d -> b t h w d', h=h, w=w)
        recon_video = self.decode(embed_hidden)
        recon_video = rearrange(recon_video, 'b 1 c h w -> b c h w') if is_image else recon_video.clone()

        if only_return_recon:
            return recon_video

        if mask is not None:
            recon_loss = F.mse_loss(recon_video, video, reduction='none')
            recon_loss = recon_loss[repeat(mask, 'b t -> b c t', c=c)]
            recon_loss = recon_loss.mean()
        else:
            recon_loss = F.mse_loss(recon_video, video)
            abs_loss = (recon_video - video).abs().mean()
            recon_loss = recon_loss + abs_loss

        # pick random frame to do discriminate loss and perceptual loss
        pick_frame_logits = torch.randn(b, f)
        if mask is not None:
            mask_value = -torch.finfo(pick_frame_logits.dtype).max
            pick_frame_logits = pick_frame_logits.masked_fill(~mask, mask_value)

        pick_frame_indices = pick_frame_logits.topk(1, dim=-1).indices
        # whether to return discriminator loss
        if return_discriminator_loss:
            assert self.discriminator is not None, 'Must initiate discriminator then to train it'

            real_video_frame = pick_video_frame(video, pick_frame_indices)
            recon_video_frame = pick_video_frame(recon_video, pick_frame_indices)

            recon_video = recon_video.detach()
            real_video_frame.requires_grad_()

            recon_video_discr_logits, real_video_discr_logits = \
                map(self.discriminator, (recon_video_frame, real_video_frame))

            loss = self.discr_loss(recon_video_discr_logits, real_video_discr_logits)

            if apply_grad_penalty:
                gp = gradient_penalty(real_video_frame, real_video_discr_logits)
                loss += gp

            return loss

        if not self.use_vgg_and_gan:
            total_loss = recon_loss + vq_loss * self.code_weight
            loss_dict = {
                'recon_loss': recon_loss,
                'vq_los': vq_loss * self.code_weight,
            }
            return total_loss, loss_dict, recon_video

            # total_loss = recon_loss
            # loss_dict = {
            #     'recon_loss': recon_loss
            # }
            # return total_loss, loss_dict, recon_video

        # perceptual loss
        video_vgg_input = pick_video_frame(video, pick_frame_indices)
        recon_vgg_input = pick_video_frame(recon_video, pick_frame_indices)

        # handle grayscale for vgg
        if video.shape[1] == 1:
            video_vgg_input, recon_vgg_input = map(lambda t: repeat(t, 'b 1 ... -> b c ...', c=3),
                                                   (video_vgg_input, recon_vgg_input))

        video_vgg_feats, recon_vgg_feats = \
            self.vgg(video_vgg_input), self.vgg(recon_vgg_input)

        perceptual_loss = F.mse_loss(recon_vgg_feats, video_vgg_feats)

        # gen loss
        gen_loss = self.gen_loss(self.discriminator(recon_vgg_input))

        # get adaptive weight for gan loss
        pixel_layer_weight = self.first_frame_to_pixels[0].weight

        norm_gen_grad = get_layer_grad(gen_loss, pixel_layer_weight).norm(p=2)
        norm_perceptual_grad = get_layer_grad(perceptual_loss, pixel_layer_weight).norm(p=2)
        adaptive_weight = safe_div(norm_perceptual_grad, norm_gen_grad)
        adaptive_weight.clamp_(max=1e4)

        # total_loss
        total_loss = recon_loss + perceptual_loss + vq_loss + gen_loss
        loss_dict = {
            'recon_loss': recon_loss,
            'perceptual_loss': perceptual_loss,
            'vq_loss': vq_loss,
            'gen_loss': gen_loss,
        }

        # loss = recon_loss + perceptual_loss + vq_loss + adaptive_weight * gen_loss
        return total_loss, loss_dict, recon_video
