import torch
import torch.nn as nn
import torch.nn.functional as F

import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

from cnn import TCNBlock_spk
from cnn import ResNet
from cnn import TCNStack


class Spex(nn.Module):
    def __init__(
        self,
        L1=20,
        L2=80,
        L3=160,
        N=256,
        O=256,
        P=512,
        spk_embed_dim=256,
        Q=3,
        B=8,
        num_spks=101,
    ):
        super().__init__()
        self.L1 = L1
        self.L2 = L2
        self.L3 = L3
        self.encoder1 = nn.Conv1d(1, N, L1, stride=self.L1 // 2, padding=0)
        self.encoder2 = nn.Conv1d(1, N, L2, stride=self.L1 // 2, padding=0)
        self.encoder3 = nn.Conv1d(1, N, L3, stride=self.L1 // 2, padding=0)

        ## TODO: apply gLN instead of cLN
        self.speaker_encoder = nn.Sequential(
            nn.GroupNorm(1, 3 * N, 1e-8),
            nn.Conv1d(3 * N, O, 1),
            ResNet(O, O),
            ResNet(O, P),
            ResNet(P, P),
            nn.Conv1d(P, spk_embed_dim, 1),
        )

        ## speech encoder
        self.y_norm = nn.GroupNorm(1, 3 * N, 1e-8)
        self.y_proj = nn.Conv1d(3 * N, O, 1)
        ## TODO: casual = False
        self.conv_1_first = TCNBlock_spk(O, spk_embed_dim, P, Q, 1, False)
        self.conv_1_other = TCNStack.build_TCNStack(
            B,
            O,
            P,
            Q,
            False,
        )
        self.conv_2_first = TCNBlock_spk(O, spk_embed_dim, P, Q, 1, False)
        self.conv_2_other = TCNStack.build_TCNStack(
            B,
            O,
            P,
            Q,
            False,
        )
        self.conv_3_first = TCNBlock_spk(O, spk_embed_dim, P, Q, 1, False)
        self.conv_3_other = TCNStack.build_TCNStack(
            B,
            O,
            P,
            Q,
            False,
        )
        self.conv_4_first = TCNBlock_spk(O, spk_embed_dim, P, Q, 1, False)
        self.conv_4_other = TCNStack.build_TCNStack(
            B,
            O,
            P,
            Q,
            False,
        )
        self.mask1 = nn.Conv1d(O, N, 1)
        self.mask2 = nn.Conv1d(O, N, 1)
        self.mask3 = nn.Conv1d(O, N, 1)

        self.decoder_1d_short = nn.ConvTranspose1d(N, 1, L1, L1 // 2, bias=True)
        self.decoder_1d_middle = nn.ConvTranspose1d(N, 1, L2, L1 // 2, bias=True)
        self.decoder_1d_long = nn.ConvTranspose1d(N, 1, L3, L1 // 2, bias=True)

        self.linear = nn.Linear(spk_embed_dim, num_spks)

    def forward(self, x, aux):
        ### speech encoder
        ### TODO: assume aux length is aux
        w1 = self.encoder1(x)
        w1 = F.relu(w1)
        T = w1.shape[-1]
        xlen1 = x.shape[-1]
        xlen2 = (T - 1) * (self.L1 // 2) + self.L2
        xlen3 = (T - 1) * (self.L1 // 2) + self.L3
        w2 = F.relu(self.encoder2(F.pad(x, (0, xlen2 - xlen1), "constant", 0)))
        w3 = F.relu(self.encoder3(F.pad(x, (0, xlen3 - xlen1), "constant", 0)))
        ## N x 3N x T
        ws = torch.concat([w1, w2, w3], 1)
        ### speaker encoder
        aux_w1 = self.encoder1(aux)
        aux_T_shape = aux_w1.shape[-1]
        aux_len1 = aux.shape[-1]
        aux_len2 = (aux_T_shape - 1) * (self.L1 // 2) + self.L2
        aux_len3 = (aux_T_shape - 1) * (self.L1 // 2) + self.L3
        aux_w2 = F.relu(
            self.encoder2(F.pad(aux, (0, aux_len2 - aux_len1), "constant", 0))
        )
        aux_w3 = F.relu(
            self.encoder3(F.pad(aux, (0, aux_len3 - aux_len1), "constant", 0))
        )
        aux_ws = torch.concat([aux_w1, aux_w2, aux_w3], 1)
        aux = self.speaker_encoder(aux_ws)
        y = self.y_norm(ws)
        y = self.y_proj(ws)
        aux_concat = (torch.sum(aux, -1) / aux.shape[-1]).unsqueeze(-1)
        y = self.conv_1_first(y, aux_concat)
        y = self.conv_1_other(y)
        y = self.conv_2_first(y, aux_concat)
        y = self.conv_2_other(y)
        y = self.conv_3_first(y, aux_concat)
        y = self.conv_3_other(y)
        y = self.conv_4_first(y, aux_concat)
        y = self.conv_4_other(y)
        m1 = F.relu(self.mask1(y))
        m2 = F.relu(self.mask2(y))
        m3 = F.relu(self.mask3(y))
        S1 = w1 * m1
        S2 = w2 * m2
        S3 = w3 * m3
        return (
            self.decoder_1d_short(S1),
            self.decoder_1d_middle(S2)[:, :, :xlen1],
            self.decoder_1d_long(S3)[:, :, :xlen1],
            F.softmax(self.linear(aux_concat.squeeze(-1)), dim=1),
        )


if __name__ == "__main__":
    speakerEncoder = Spex()
    x = torch.randn(3, 1, 30000)
    aux = torch.randn(3, 1, 1000)
    s1, s2, s3, soft = speakerEncoder(x, aux)
    print(f"shape of s1, s2, s3 is {s1.shape}, {s2.shape}, {s3.shape}, {soft.shape}")

    pass
