import torch
import torch.nn as nn
from torch.autograd import Variable


class Conv1DBlock(nn.Module):
    def __init__(
        self, input_channel, hidden_channel, kernel, padding, dilation, causal
    ):
        super().__init__()
        self.causal = causal
        ## TODO: skip is always true

        self.conv1d = nn.Conv1d(input_channel, hidden_channel, 1)
        if self.causal:
            # TODO: casual situaion
            pass
        else:
            self.padding = padding

        self.dconv = nn.Conv1d(
            hidden_channel,
            hidden_channel,
            kernel,
            dilation=dilation,
            padding=padding,
            groups=hidden_channel,  ## depthwise convolution
        )

        self.res_out = nn.Conv1d(hidden_channel, input_channel, 1)

        self.pReLU1 = nn.PReLU()
        self.pReLU2 = nn.PReLU()

        if self.causal:
            ## TODO: normalization layer
            pass
        else:
            self.norm1 = nn.GroupNorm(1, hidden_channel, 1e-8)
            self.norm2 = nn.GroupNorm(1, hidden_channel, 1e-8)

        self.skip_out = nn.Conv1d(hidden_channel, input_channel, 1)
        pass

    def forward(self, input):
        output = self.norm1(self.pReLU1(self.conv1d(input)))
        if self.causal:
            ## TODO : causal situation
            pass
        else:
            output = self.norm2(self.pReLU2(self.dconv(output)))
            pass
        residual = self.res_out(output)
        ## TODO: skip is always enabled
        skip = self.skip_out(output)
        return residual, skip

    pass


class TCN(nn.Module):
    def __init__(
        self,
        enc_dimension,
        output_dimension,
        BN_dim,
        hidden_dim,
        stack=3,
        layer=8,
        causal=False,
    ):
        super().__init__()
        self.causal = causal
        if self.causal:
            ## CLN layer
            pass
        ## TODO: layernorm ?
        else:
            self.layerNorm = nn.GroupNorm(1, enc_dimension, eps=1e-8)

        ## TODO : Why the kernel size is 1?
        self.BN = nn.Conv1d(enc_dimension, BN_dim, 1)

        self.stacks = nn.ModuleList([])
        for i in range(0, stack):
            for j in range(0, layer):
                self.stacks.append(
                    Conv1DBlock(
                        BN_dim,
                        hidden_dim,
                        kernel=3,
                        dilation=2**j,
                        padding=2**j,
                        causal=causal,
                    )
                )

        self.output = nn.Sequential(nn.PReLU(), nn.Conv1d(BN_dim, output_dimension, 1))

        pass

    def forward(self, input):
        ## B, N, T
        input = self.layerNorm(input)
        output = self.BN(input)

        skip_connection = 0.0

        for i in range(0, len(self.stacks)):
            residual, skip = self.stacks[i](output)
            output = output + residual
            skip_connection = skip_connection + skip
            pass

        output = self.output(skip_connection)

        return output

        pass


class ConvTasNet(nn.Module):
    def __init__(self, enc_dim, sr=16000, win=2, num_spk=2, debugger=False):
        super().__init__()
        self.enc_dim = enc_dim

        self.debugger = debugger

        self.num_spk = num_spk

        self.win = int(sr * win / 1000)
        self.stride = self.win // 2

        ## the encoder
        self.encoder = nn.Conv1d(
            1, self.enc_dim, self.win, stride=self.stride, bias=False
        )

        ## self.separator is determined by TCN
        self.feature_dimension = 128
        self.seperator = self.build_TCN(512)

        # output decoder
        self.decoder = nn.ConvTranspose1d(
            self.enc_dim, 1, self.win, bias=False, stride=self.stride
        )

        pass

    def build_TCN(
        self,
        enc_dimension,
    ):
        ## N, T
        return TCN(
            enc_dimension,
            self.enc_dim * self.num_spk,
            self.feature_dimension,
            self.feature_dimension * 4,
        )
        pass

    def pad_signal(self, input):
        ## pad signal

        if input.dim() not in [2, 3]:
            raise RuntimeError("Input can only be 2 or 3 dimensional.")

        if input.dim() == 2:
            input = input.unsqueeze(1)
        batch_size = input.size(0)
        nsample = input.size(2)

        ## TODO: understand what is the flow to pad signal
        rest = self.win - (self.stride + nsample % self.win) % self.win
        if rest > 0:
            pad = Variable(torch.zeros(batch_size, 1, rest)).type(input.type())
            input = torch.cat([input, pad], 2)
        pad_aux = Variable(torch.zeros(batch_size, 1, self.stride)).type(input.type())
        input = torch.cat([pad_aux, input, pad_aux], 2)

        return input, rest

    def forward(self, input):
        ## pad input
        input, rest = self.pad_signal(input)
        batch_size = input.size(0)

        print(f"input shape {input.shape}")
        ## encoder
        enc_output = self.encoder(input)
        print(f"enc output shape {enc_output.shape}")

        ## separator

        output = torch.sigmoid(self.seperator(enc_output))
        masks = output.view(batch_size, self.num_spk, self.enc_dim, -1)
        masked_output = enc_output.unsqueeze(1) * masks

        output = self.decoder(
            masked_output.view(batch_size * self.num_spk, self.enc_dim, -1)
        )
        output = output[:, :, self.stride : -(rest + self.stride)].contiguous()
        output = output.view(batch_size, self.num_spk, -1)  # B, C, T

        ## try not contiguous

        return output
        pass


if __name__ == "__main__":
    m = ConvTasNet(512)
    input = torch.randn(2, 1, 32000)

    output = m(input)
    x = torch.randn(20, 1, 32000)
    y = torch.randn(20, 2, 32000)

    print(x[0].shape)

    pass
