import math
from typing import Tuple

import torch
import torch.nn as nn
import torch.nn.functional as F

from torch import einsum
from einops import rearrange, repeat
from torch.utils.checkpoint import checkpoint

from sgm.modules.cnn import CausalConv3d
from sgm.modules.position_bias import AlibiPositionalBias


def default(val, d):
    return val if val is not None else d


def init_weights(m):
    if isinstance(m, nn.Linear):
        # we use xavier_uniform following official JAX ViT:
        torch.nn.init.xavier_uniform_(m.weight)
        if m.bias is not None:
            nn.init.constant_(m.bias, 0)
    elif isinstance(m, nn.LayerNorm):
        nn.init.constant_(m.bias, 0)
        nn.init.constant_(m.weight, 1.0)
    elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
        w = m.weight.data
        torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))


# bias-less layer norm

class LayerNorm(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.gamma = nn.Parameter(torch.ones(dim))
        self.register_buffer("beta", torch.zeros(dim))

    def forward(self, x):
        return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)


class Attention(nn.Module):
    def __init__(self, dim, context_size=None, head_size=64, num_heads=16, causal=True,
                 normalize_context=True, num_null_kv=0, dropout=0.1):
        super().__init__()

        hidden_size = num_heads * head_size
        context_size = default(context_size, dim)

        self.register_buffer('scale', torch.tensor(head_size ** -0.5))
        self.head_size = head_size

        self.causal = causal
        if causal:
            self.rel_pos_bias = AlibiPositionalBias(num_heads=num_heads)

        self.num_heads = num_heads
        self.norm = LayerNorm(dim)
        self.context_norm = LayerNorm(context_size) if normalize_context else nn.Identity()

        # self.num_null_kv = num_null_kv
        # self.null_kv = nn.Parameter(torch.randn(num_heads, 2 * num_null_kv, head_size))
        self.attn_dropout = nn.Dropout(dropout)

        self.to_q = nn.Linear(dim, hidden_size, bias=False)
        self.to_kv = nn.Linear(context_size, hidden_size * 2, bias=False)
        # self.scale_q = nn.Parameter(torch.ones(head_size))
        # self.scale_k = nn.Parameter(torch.ones(head_size))

        self.out = nn.Linear(hidden_size, dim, bias=False)

    def forward(self, x, mask=None, context=None, attn_bias=None):
        if context is not None:
            context = self.context_norm(context)

        b = x.shape[0]
        kv_inputs = default(context, x)
        x = self.norm(x)

        q = self.to_q(x)
        k, v = self.to_kv(kv_inputs).chunk(2, dim=-1)

        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads), (q, k, v))
        # b h n d
        """
            nk, nv = repeat(self.null_kv, 'h (n r) d -> b h n r d', b=b, r=2).unbind(dim=-2)
            k, v = torch.cat((nk, v), dim=-2), torch.cat((nv, v), dim=-2)
            q, k = map(lambda t: F.normalize(t, dim=-1), (q, k))
            q, k = q * self.scale_q, k * self.scale_k
            sim = einsum('b h i d, b h j d -> b h i j', q, k) * torch.tensor(self.scale, device=x.device)
        """
        sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
        i, j = sim.shape[-2:]

        if attn_bias is not None:
            # attn_bias = F.pad(attn_bias, (self.num_null_kv, 0), value=0.)
            sim += attn_bias

        if mask is not None:
            mask = F.pad(mask, (self.num_null_kv, 0), value=True)
            mask = rearrange(mask, 'b j -> b 1 1 j')
            sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)

        if self.causal:
            sim + sim + self.rel_pos_bias(sim)
            causal_mask = torch.ones((i, j), device=x.device, dtype=torch.bool).triu(j - i + 1)
            sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)

        attn = sim.softmax(dim=-1)
        attn = self.attn_dropout(attn)

        out = einsum('b h i j, b h j d -> b h i d', attn, v)
        out = rearrange(out, 'b h n d -> b n (h d)')
        return self.out(out)


class GEGLU(nn.Module):
    def forward(self, x):
        x, gate = x.chunk(2, dim=-1)
        return F.gelu(gate) * x


class FeedForward(nn.Module):
    def __init__(self, dim, multiply=4, dropout=0.1):
        super().__init__()

        hidden_size = int(multiply * (2 / 3) * dim)
        self.net = nn.Sequential(
            nn.LayerNorm(dim),
            nn.Linear(dim, hidden_size * 2, bias=False),
            GEGLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_size, dim, bias=False)
        )

    def forward(self, x):
        return self.net(x)


class PEG(nn.Module):
    def __init__(self, dim, causal=False):
        super().__init__()
        if causal:
            self.net = CausalConv3d(in_channels=dim, out_channels=dim, kernel_size=3)
        else:
            self.net = nn.Conv3d(in_channels=dim, out_channels=dim, kernel_size=3, padding=1)

    def forward(self, x, shape):
        needs_shape = x.ndim == 3
        assert not (needs_shape and shape is None)

        # b,
        origin_shape = x.shape
        if needs_shape:
            x = x.reshape(*shape, -1)
        x = rearrange(x, 'b ... d -> b d ...')
        x = self.net(x)
        x = rearrange(x, 'b d ... -> b ... d')

        if needs_shape:
            x = rearrange(x, 'b ... d -> b (...) d')

        return x.reshape(origin_shape)


class Transformer(nn.Module):
    def __init__(self, dim, num_layers, context_size=None, head_size=64, num_heads=12, peg=False, causal=False,
                 peg_causal=True, cross_attn=False, attn_num_null_kv=0, ff_mult=4, attn_dropout=0.1, ff_dropout=0.1,
                 gradient_checkpoint=False):
        super().__init__()
        self.layers = nn.ModuleList([])
        self.checkpointing = gradient_checkpoint

        for _ in range(num_layers):
            # self.layers.append(
            #     nn.ModuleList([
            #         PEG(dim=dim, causal=peg_causal) if peg else None,
            #         Attention(dim=dim, head_size=head_size, num_heads=num_heads, causal=causal, dropout=attn_dropout),
            #         Attention(dim=dim, head_size=head_size, context_size=context_size, num_heads=num_heads,
            #                   causal=False, num_null_kv=attn_num_null_kv, dropout=attn_dropout) if cross_attn else None,
            #         FeedForward(dim=dim, multiply=ff_mult, dropout=ff_dropout)
            #     ])
            # )
            self.layers.append(
                nn.ModuleList([
                    Attention(dim=dim, head_size=head_size, num_heads=num_heads, causal=causal, dropout=attn_dropout),
                    FeedForward(dim=dim, multiply=ff_mult, dropout=ff_dropout)
                ])
            )

        self.norm = LayerNorm(dim)
        self.apply(init_weights)

    def forward(
            self,
            x,
            video_shape: Tuple[int, int, int, int] = None,
            context=None,
            attn_bias=None,
            self_attn_mask=None,
            cross_attn_context_mask=None
    ):
        if self.checkpointing and self.training:
            return checkpoint(self._forward, x, video_shape, context, attn_bias, self_attn_mask,
                              cross_attn_context_mask)
        else:
            return self._forward(x, video_shape, context, attn_bias, self_attn_mask, cross_attn_context_mask)

    def _forward(
            self,
            x,
            video_shape: Tuple[int, int, int, int] = None,
            context=None,
            attn_bias=None,
            self_attn_mask=None,
            cross_attn_context_mask=None
    ):

        # for peg, self_attn, cross_attn, ff in self.layers:
        #     if peg is not None:
        #         x = peg(x, shape=video_shape) + x
        #
        #     x = self_attn(x, attn_bias=attn_bias, mask=self_attn_mask) + x
        #
        #     if cross_attn is not None and context is not None:
        #         x = cross_attn(x, context=context, mask=cross_attn_context_mask) + x
        #
        #     x = ff(x) + x
        #
        # return self.norm(x)

        for self_attn, ff in self.layers:
            x = self_attn(x, attn_bias=attn_bias, mask=self_attn_mask) + x
            x = ff(x) + x

        return self.norm(x)
