import torch
import torch.nn as nn
import torch.nn.functional as F

from math import ceil


class IntraFrame(nn.Module):
    def __init__(
        self, i_stride=8, j_stride=1, emb_channel=32, hidden_channels=256
    ) -> None:
        super().__init__()
        self.i_stride = i_stride
        self.j_stride = j_stride
        self.hidden_channels = hidden_channels
        in_channels = emb_channel * i_stride

        ## layerNorm
        self.layerNorm = nn.GroupNorm(1, in_channels)
        ## bi-lstm
        self.bi_lstm = nn.LSTM(
            in_channels, hidden_channels, 1, batch_first=True, bidirectional=True
        )
        self.deconv = nn.ConvTranspose1d(
            hidden_channels * 2, emb_channel, i_stride, j_stride
        )

    def forward(self, input):
        """
        Input shape: [B, emb (C or D), T, F (Q)]
        """

        # padding
        B, C, T, old_Q = input.shape
        Q = (
            ceil((old_Q - self.i_stride) / self.j_stride) * self.j_stride
            + self.i_stride
        )
        # [B, emb, T, F(Q)]
        x = F.pad(input, (0, Q - old_Q))
        # [B, T, emb, F(Q)]
        x = x.permute(0, 2, 1, 3).contiguous()
        # [B*T, emb(C), Q]
        x = x.view(B * T, C, Q)
        # [B*T, C(emb) * i_stride, -1]
        x = F.unfold(x[..., None], (self.i_stride, 1), stride=(self.j_stride, 1))
        # [B, T, C* I , (F'-I)/J + 1]
        x = x.view(
            B, T, C * self.i_stride, int((Q - self.i_stride) / self.j_stride + 1)
        )
        # [B, I*D, T, (F'-I)/J + 1]
        x = self.layerNorm(x.permute(0, 2, 1, 3))
        # [B*T, (F'-I)/J + 1 , I*D]
        x = x.permute(0, 2, 3, 1).contiguous().view(B * T, -1, self.i_stride * C)
        # [B*T, (F'-I)/J + 1, 2H]
        x, _ = self.bi_lstm(x)

        # [B* T, 2H, (F'-I)/J + 1]
        x = x.permute(0, 2, 1).contiguous()

        # [B*T, C(emb), F']
        x = self.deconv(x)
        # [B, emb, T, F']
        x = x.view(B, T, C, Q).transpose(1, 2).contiguous()

        x = x + input

        return x


class InterFrame(nn.Module):
    def __init__(
        self, i_stride=8, j_stride=1, emb_channel=32, hidden_channels=256
    ) -> None:
        super().__init__()
        self.i_stride = i_stride
        self.j_stride = j_stride
        self.hidden_channels = hidden_channels
        in_channels = emb_channel * i_stride

        ## layerNorm
        self.layerNorm = nn.GroupNorm(1, in_channels)
        ## bi-lstm
        self.bi_lstm = nn.LSTM(
            in_channels, hidden_channels, 1, batch_first=True, bidirectional=True
        )
        self.deconv = nn.ConvTranspose1d(
            hidden_channels * 2, emb_channel, i_stride, j_stride
        )

    def forward(self, input):
        """
        Input shape: [B, emb (C or D), T, F (Q)]
        """

        # padding
        B, C, old_T, Q = input.shape
        T = (
            ceil((old_T - self.i_stride) / self.j_stride) * self.j_stride
            + self.i_stride
        )
        # [B, emb, T', F(Q)]
        x = F.pad(input, (0, 0, 0, T - old_T))
        # [B , F(Q) , emb(C), T']
        x = x.permute(0, 3, 1, 2).contiguous()
        # [B*Q(F), emb(C), T]
        x = x.view(B * Q, C, T)
        # [B*Q, C(emb) * i_stride, -1]
        x = F.unfold(x[..., None], (self.i_stride, 1), stride=(self.j_stride, 1))
        # [B, Q, C* I , (T'-I)/J + 1]
        x = x.view(
            B, Q, C * self.i_stride, int((T - self.i_stride) / self.j_stride + 1)
        )
        # [B, C*I, Q, (T'-I)/J + 1]
        x = self.layerNorm(x.permute(0, 2, 1, 3))
        # [B*Q, (F'-I)/J + 1 , I*D]
        x = x.permute(0, 2, 3, 1).contiguous().view(B * Q, -1, self.i_stride * C)
        # [B*Q, (F'-I)/J + 1, 2H]
        x, _ = self.bi_lstm(x)
        # [B* Q, 2H, (F'-I)/J + 1]
        x = x.permute(0, 2, 1).contiguous()

        # [B*Q, C(emb), T']
        x = self.deconv(x)
        # [B, emb, T, F']
        x = x.view(B, Q, C, T).permute(0, 2, 3, 1).contiguous()

        x = x + input

        return x


class MultiHeadAttention(nn.Module):
    """Wrapper for multihead attention"""

    def __init__(self, head=4, emb_dim=32, fre_dim=129) -> None:
        super().__init__()
        self.attentions = nn.ModuleList(
            [Attention(L=head, emb_dim=emb_dim) for _ in range(0, head)]
        )
        self.conv2d = nn.Conv2d(emb_dim, emb_dim, 1)
        self.norm = nn.PReLU()
        self.norm_cf = nn.LayerNorm((emb_dim, fre_dim))

    def forward(self, input):
        """
        Input shape: [B, emb (C or D), T, F]
        """
        B, D, T, F = input.shape
        res = []
        for i in range(0, len(self.attentions)):
            res.append(self.attentions[i](input))
        output = torch.cat(res, -1)
        output = output.reshape(B, D, T, F)
        # [B, D, T, F]
        output = self.norm(self.conv2d(output))
        # [B, T, D, F]
        output = output.permute(0, 2, 1, 3)
        # [B, D, T, F]
        output = self.norm_cf(output).permute(0, 2, 1, 3)
        output = output + input

        return output

    pass


class Attention(nn.Module):
    """Single-Head Attention"""

    def __init__(self, E=4, emb_dim=32, fre_dim=129, L=4) -> None:
        super().__init__()
        self.E = E
        self.fre_dim = fre_dim
        D = emb_dim
        self.D = D
        self.L = L
        self.q_before = nn.Sequential(nn.Conv2d(emb_dim, E, 1), nn.PReLU())
        self.q_norm_cf = nn.LayerNorm((E, fre_dim))
        self.k_before = nn.Sequential(nn.Conv2d(emb_dim, E, 1), nn.PReLU())
        self.k_norm_cf = nn.LayerNorm((E, fre_dim))
        self.v_before = nn.Sequential(nn.Conv2d(emb_dim, D // L, 1), nn.PReLU())
        self.v_norm_cf = nn.LayerNorm((D // L, fre_dim))

    def forward(self, input):
        """
        Input shape: [B, emb(D), T, F]
        """
        B, emb, T, Fre = input.shape
        q = self.q_norm_cf(self.q_before(input).transpose(1, 2).contiguous()).view(
            B, T, self.E * Fre
        )

        k = self.k_norm_cf(self.k_before(input).transpose(1, 2).contiguous()).view(
            B, T, self.E * Fre
        )

        v = self.v_norm_cf(self.v_before(input).transpose(1, 2).contiguous()).view(
            B, T, self.D // self.L * Fre
        )
        atten_mat = torch.matmul(q, k.transpose(1, 2)) / ((Fre * self.E) ** 0.5)
        atten_mat = F.softmax(atten_mat, dim=2)
        atten_mat = torch.matmul(atten_mat, v)

        return atten_mat

        pass

    pass


if __name__ == "__main__":
    # m = InterFrame()
    x = torch.randn(3, 32, 501, 129)
    a = MultiHeadAttention()
    y = a(x)
    print(y.shape)

    pass
