import torch
import torch.nn as nn
import torch.nn.functional as F

from .true.posEnc import PositionalEncoding


class Encoder(nn.Module):
    """
    Input shape B x 1 x L
    output shape: B x out_channels (F) x T
    """

    def __init__(self, kernel_size=16, out_channels=256, in_channels=1):
        super().__init__()
        self.conv1d = nn.Conv1d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=kernel_size // 2,
            groups=1,
            bias=False,
        )

    def forward(self, x):
        # shape of x : B x 1 x L
        x = self.conv1d(x)
        x = F.relu(x)
        return x

    pass


class Decoder(nn.Module):
    """A decoder layer that consists of ConvTranspose1d.

    Arguments
    ---------
    kernel_size : int
        Length of filters.
    in_channels : int
        Number of  input channels.
    out_channels : int
        Number of output channels.


    Example
    ---------
    >>> x = torch.randn(2, 100, 1000)
    >>> decoder = Decoder(kernel_size=4, in_channels=100, out_channels=1)
    >>> h = decoder(x)
    >>> h.shape
    torch.Size([2, 1003])
    """

    def __init__(self, in_channels=256, out_channels=1, kernel_size=16):
        super().__init__()
        self.decoder = nn.ConvTranspose1d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=kernel_size // 2,
            groups=1,
            bias=False,
        )

    def forward(self, x):
        """Return the decoded output.

        Arguments
        ---------
        x : torch.Tensor
            Input tensor with dimensionality [B, N, L].
                where, B = Batchsize,
                       N = number of filters
                       L = time points
        """

        if x.dim() not in [2, 3]:
            raise RuntimeError("{} accept 3/4D tensor as input".format(self.__name__))
        x = self.decoder(x if x.dim() == 3 else torch.unsqueeze(x, 1))

        if torch.squeeze(x).dim() == 1:
            x = torch.squeeze(x, dim=1)
        else:
            x = torch.squeeze(x)
        return x


class Transfomer(nn.Module):
    def __init__(
        self,
        in_type,
        num_features=256,
        num_heads=8,
        d_ff=1024,
        num_layers=8,
        batch_first=True,
        norm_first=True,
    ):
        super().__init__()
        if in_type == "intra":
            self.intra = True
        else:
            self.intra = False

        encoder_layer = nn.TransformerEncoderLayer(
            num_features,
            num_heads,
            d_ff,
            batch_first=batch_first,
            norm_first=norm_first,
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        self.positional_encoding = PositionalEncoding(
            num_features, batch_first=batch_first
        )

    def forward(self, input):
        ## the shape of x is [B,feature_dim, chunk_size, chunk_number (S)]
        batch_size, num_features, chunk_size, S = input.size()
        if not self.intra:
            # [B, chunk_size, S, num_features]
            x = input.permute(0, 2, 3, 1).contiguous()
            # [B * chunk_size, S, num_features]
            x = x.view(batch_size * chunk_size, S, num_features)
            x = self.positional_encoding(x)
            x = self.transformer(x)  # [B * chunk_size, S, num_features]
            x = x.view(batch_size, chunk_size, S, num_features)
            x = x.permute(0, 3, 1, 2).contiguous()
            output = x + input
            pass
        else:
            ## permute x to (chunk_size, batch_size, S, num_features)

            # [B, S, chunk_size, feature_dim]
            x = input.permute(0, 3, 2, 1).contiguous()
            x = x.view(batch_size * S, chunk_size, num_features)
            x = self.positional_encoding(x)
            x = self.transformer(x)  # batch_size * S, chunk_size, num_features

            # B, feature_dim, chunk_size, chunk_number (S)
            x = x.view(batch_size, S, chunk_size, num_features)
            x = x.permute(0, 3, 2, 1).contiguous()
            # B, num_features, chunk_size, chunk_number (S)
            output = x + input
        return output


class SepFormer(nn.Module):
    def __init__(
        self,
        num_features=256,
        num_heads=8,
        d_ff=1024,
        num_layers=8,
        batch_first=True,
        norm_first=True,
    ):
        super().__init__()
        self.intra_first = Transfomer(
            "intra", num_features, num_heads, d_ff, num_layers, batch_first, norm_first
        )
        self.intra_second = Transfomer(
            "intra", num_features, num_heads, d_ff, num_layers, batch_first, norm_first
        )
        self.inter_first = Transfomer(
            "inter", num_features, num_heads, d_ff, num_layers, batch_first, norm_first
        )
        self.inter_second = Transfomer(
            "inter", num_features, num_heads, d_ff, num_layers, batch_first, norm_first
        )

    def forward(self, x):
        x = self.intra_first(x)
        x = self.inter_first(x)
        x = self.intra_second(x)
        x = self.inter_second(x)
        return x
        pass


if __name__ == "__main__":
    device = "cuda"
    x = torch.randn(1, 256, 250, 34).to(device)
    # transformer = Transfomer(in_type="intra")
    # out = transformer(x)  # [170,250,256]
    # transformer = Transfomer(in_type="inter")
    # out = transformer(x)  # [170,250,256]
    # sepformer = SepFormer().to(device)
    # out = sepformer(x)
    # print(out.shape)

    pass
