import torch
import torch.nn as nn
import torch.nn.functional as F
import copy

from models.local.PositionalEncoding import PositionalEncoding
from models.local.TFgridnet_v2 import GridNetV2Block



EPS = 1e-8

def select_norm(norm, dim, shape, eps=1e-8):
    """Just a wrapper to select the normalization type.
    """

    if norm == "gln":
        return GlobalLayerNorm(dim, shape, elementwise_affine=True, eps=eps)
    if norm == "cln":
        return CumulativeLayerNorm(dim, elementwise_affine=True, eps=eps)
    if norm == "ln":
        return nn.GroupNorm(1, dim, eps=eps)
    else:
        return nn.BatchNorm1d(dim)

class FiLM(nn.Module):
    def __init__(self, size = 256):
        super(FiLM, self).__init__()
        self.linear1 = nn.Conv2d(size,size,1)
        self.linear2 = nn.Conv2d(size,size,1)
    
    def forward(self,x,aux):
        x = x * self.linear1(aux) + self.linear2(aux)
        return x


class Tar_Model(nn.Module):

    def __init__(
        self,
        stft,
        istft,
        # fusion_mdl_R,
        # fusion_mdl_I,
        # film,
        # film_I,
        real_att,
        # imag_att,
        n_freqs,
        hidden_channels,
        n_head,
        emb_dim,
        emb_ks,
        emb_hs,
        d_model,
        num_layers=2,
        norm="ln",
        num_spks=1,
        eps = 1e-5,
    ):
        super(Tar_Model, self).__init__()
        self.num_spks = num_spks
        self.num_layers = num_layers

        # self.pre_train_mdl = pre_train_mdl

        self.stft = stft
        self.istft = istft

        self.att = real_att
        # self.imag_att = imag_att

        t_ksize = 3
        ks, padding = (t_ksize, 3), (t_ksize // 2, 1)
        # self.conv1d1 = nn.Conv1d(n_freqs, d_model, 1, bias=False)
        # self.deconv1d1 = nn.ConvTranspose1d(d_model, n_freqs, 1)
        self.conv = nn.Sequential(
            nn.Conv2d(2, emb_dim, ks, padding=padding),
            nn.GroupNorm(1, emb_dim, eps=eps),
        )
        self.se_deconv = nn.ConvTranspose2d(2*emb_dim, 2, ks, padding=padding)
        # self.sd_deconv1 = nn.ConvTranspose2d(2*emb_dim, 1, ks, padding=padding)

        # self.sd_conv = nn.Conv1d(n_freqs, 1, kernel_size=2, stride=1, padding=0)
        # self.sd_norm = select(norm, n_freqs, 3)
        # self.sd_deconv2 = nn.ConvTranspose1d(1, 1, kernel_size=2, stride=1, padding=0)

        
        self.dual_mdl = nn.ModuleList([])
        for i in range(num_layers):
            self.dual_mdl.append(
                copy.deepcopy(
                    GridNetV2Block(
                        # film,
                        # self.fusion_norm,
                        2*emb_dim,
                        emb_ks,
                        emb_hs,
                        n_freqs,
                        hidden_channels,
                        n_head,
                        approx_qk_dim=512,
                        activation="prelu",
                    )
                )
            )



    def forward(self, input, aux):

        # before each line we indicate the shape after executing the line

        # [B, N, L]
        input = input.unsqueeze(1)
        aux  = aux.unsqueeze(1)
        # print(input.shape, aux.shape)
        std = input.std(dim=(1, 2), keepdim=True)
        input = input / std
        mix_c = self.stft(input)[-1]
        aux_c = self.stft(aux / aux.std(dim=(1, 2), keepdim=True))[-1]

        mix_ri = torch.cat([mix_c.real, mix_c.imag],dim = 1)
        mix_ri = mix_ri.permute(0,1,3,2).contiguous()
        aux_ri = torch.cat([aux_c.real, aux_c.imag],dim = 1)
        aux_ri = aux_ri.permute(0,1,3,2).contiguous()

        mix_ri = self.conv(mix_ri)
        aux_ri = self.conv(aux_ri)

        aux_ri = self.att(mix_ri, aux_ri)
        x = torch.cat([mix_ri,aux_ri], dim=1)

        for i in range(self.num_layers):

            x = self.dual_mdl[i](x)
        
        x_se = self.se_deconv(x)
        # print(x.shape)

        # out_r = self.deconv1d1(out_r)
        # out_i = self.deconv1d1(out_i)

        # est_source = self.istft((out_r, out_i), input_type="real_imag").unsqueeze(1)

        # x_sd = self.sd_deconv1(x)
        # print(x_sd.shape)
        # x_sd = self.sd_conv(x_sd.squeeze(1).permute(0,2,1).contiguous()) # B,1,T1

        # x_sd_sm = F.softmax(x_sd, dim=-1)
        # print(x_sd_sm.shape)

        # x_sd_upsample = self.sd_deconv2(x_sd_sm) # B,1,T2
        # x_sd_upsample = F.relu(x_sd_upsample)

        # print(x_sd_upsample.shape)

        # x_sd_upsample = F.interpolate(x_sd, size=est_source.shape[-1], mode='linear', align_corners=False)
        out_r = x_se[:,0,:,:].permute(0,2,1).contiguous()
        # print(out_r.shape)
        # out_r = out_r * x_sd_upsample
        out_i = x_se[:,1,:,:].permute(0,2,1).contiguous()
        # out_i = out_i * x_sd_upsample

        est_source = self.istft((out_r, out_i), input_type="real_imag").unsqueeze(1)

        # est_source = est_source * F.relu(x_sd_upsample)
        # print(est_source.shape)

        est_source = est_source * std

        
        return est_source.squeeze(1)