import torch.nn as nn
import torch
from .net import SepFormer


class MaskNet(nn.Module):
    def __init__(self, in_channels=256, chunk_size=250, num_spks=2, out_channels=256):
        super().__init__()
        ## TODO: only support gln
        self.norm = nn.GroupNorm(1, in_channels)
        self.linear = nn.Conv1d(
            in_channels=in_channels,
            out_channels=in_channels,
            kernel_size=1,
            bias=False,
        )
        self.chunk_size = chunk_size
        self.sepformer = SepFormer()
        self.prelu = nn.PReLU()
        self.linear2 = nn.Conv2d(out_channels, out_channels * num_spks, kernel_size=1)
        self.num_spks = num_spks
        self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1, bias=False)
        self.prelu = nn.PReLU()
        self.activation = nn.ReLU()
        # gated output layer
        self.output = nn.Sequential(nn.Conv1d(out_channels, out_channels, 1), nn.Tanh())
        self.output_gate = nn.Sequential(
            nn.Conv1d(out_channels, out_channels, 1), nn.Sigmoid()
        )

    def _padding(self, input, K):
        """
        padding the audio times
        K: chunks of length
        P: hop size
        input: [B, N, L]
        """
        B, N, L = input.shape
        P = K // 2
        gap = K - (P + L % K) % K
        if gap > 0:
            pad = torch.Tensor(torch.zeros(B, N, gap)).type(input.type())
            input = torch.cat([input, pad], dim=2)

        _pad = torch.Tensor(torch.zeros(B, N, P)).type(input.type())
        input = torch.cat([_pad, input, _pad], dim=2)

        return input, gap

    def _Segmentation(self, input, K):
        """
        the segmentation stage splits
        K: chunks of length
        P: hop size
        input: [B, N, L]
        output: [B, N, K, S]
        """
        B, N, L = input.shape
        P = K // 2
        input, gap = self._padding(input, K)
        # [B, N, K, S]
        input1 = input[:, :, :-P].contiguous().view(B, N, -1, K)
        input2 = input[:, :, P:].contiguous().view(B, N, -1, K)
        input = torch.cat([input1, input2], dim=3).view(B, N, -1, K).transpose(2, 3)
        return input.contiguous(), gap

    def _over_add(self, input, gap):
        """Merge the sequence with the overlap-and-add method.

        Arguments
        ---------
        input : torch.tensor
            Tensor with dim [B, N, K, S].
        gap : int
            Padding length.

        Return
        -------
        output : torch.tensor
            Tensor with dim [B, N, L].
            where, B = Batchsize,
               N = number of filters
               K = time points in each chunk
               S = the number of chunks
               L = the number of time points

        """
        B, N, K, S = input.shape
        P = K // 2
        # [B, N, S, K]
        input = input.transpose(2, 3).contiguous().view(B, N, -1, K * 2)

        input1 = input[:, :, :, :K].contiguous().view(B, N, -1)[:, :, P:]
        input2 = input[:, :, :, K:].contiguous().view(B, N, -1)[:, :, :-P]
        input = input1 + input2
        # [B, N, L]
        if gap > 0:
            input = input[:, :, :-gap]

        return input

    def forward(self, x):
        x = self.norm(x)
        x = self.linear(x)
        # B x F (embedding dimension) x C (chunk length) x N_C (length of each chunk)
        x, gap = self._Segmentation(x, self.chunk_size)

        # [B, num_features, chopping_size, chopping number]
        x = self.sepformer(x)
        x = self.prelu(x)
        x = self.linear2(x)

        B, _, K, S = x.shape
        x = x.view(B * self.num_spks, -1, K, S)
        # [B*spks, N, L]
        x = self._over_add(x, gap)
        ## TODO: get confused here, i guess this is the feed-forward
        x = self.output(x) * self.output_gate(x)
        # [B*spks, N, L]
        x = self.end_conv1x1(x)
        _, N, L = x.shape
        x = x.view(B, self.num_spks, N, L)
        # [B, spks, N, L]
        x = self.activation(x)

        # [spks, B, N, L]
        x = x.transpose(0, 1)

        return x
        pass

    pass


if __name__ == "__main__":
    device = "cuda"
    masknet = MaskNet().to(device)
    x = torch.randn(1, 256, 3999).to(device)
    y = masknet(x)
    print(y.shape)
