import torch
import torch.nn as nn
import torch.nn.functional as F
import math


class ConvNorm(torch.nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
                 padding=None, dilation=1, bias=True, w_init_gain='linear'):
        super(ConvNorm, self).__init__()
        if padding is None:
            assert(kernel_size % 2 == 1)
            padding = int(dilation * (kernel_size - 1) / 2)

        self.conv = torch.nn.Conv1d(in_channels, out_channels,
                                    kernel_size=kernel_size, stride=stride,
                                    padding=padding, dilation=dilation,
                                    bias=bias)

        torch.nn.init.xavier_uniform_(
            self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))

    def forward(self, signal):
        conv_signal = self.conv(signal)
        return conv_signal


class Encoder_lf0(nn.Module):
    def __init__(self, typ='no_emb'):
        super(Encoder_lf0, self).__init__()
        self.type = typ
        if typ != 'no_emb':
            convolutions = []
            for i in range(3):
                conv_layer = nn.Sequential(
                    ConvNorm(1 if i==0 else 256, 256,
                             kernel_size=5, stride=2 if i==2 else 1,
                             padding=2,
                             dilation=1, w_init_gain='relu'),
                    nn.GroupNorm(256//16, 256),
                    nn.ReLU())
                convolutions.append(conv_layer)
            self.convolutions = nn.ModuleList(convolutions)
            self.lstm = nn.LSTM(256, 32, 1, batch_first=True, bidirectional=True)

    def forward(self, lf0):
        if self.type != 'no_emb':
            if len(lf0.shape) == 2:
                lf0 = lf0.unsqueeze(1) # bz x 1 x 128
            for conv in self.convolutions:
                lf0 = conv(lf0) # bz x 256 x 128
            lf0 = lf0.transpose(1,2) # bz x 64 x 256
            self.lstm.flatten_parameters()
            lf0, _ = self.lstm(lf0) # bz x 64 x 64
        else:
            if len(lf0.shape) == 2:
                lf0 = lf0.unsqueeze(-1) # bz x 128 x 1 # no downsampling
        return lf0



def pad_layer(inp, layer, pad_type='reflect'):
    kernel_size = layer.kernel_size[0]
    if kernel_size % 2 == 0:
        pad = (kernel_size//2, kernel_size//2 - 1)
    else:
        pad = (kernel_size//2, kernel_size//2)
    # padding
    inp = F.pad(inp, 
            pad=pad,
            mode=pad_type)
    out = layer(inp)
    return out

def conv_bank(x, module_list, act, pad_type='reflect'):
    outs = []
    for layer in module_list:
        out = act(pad_layer(x, layer, pad_type))
        outs.append(out)
    out = torch.cat(outs + [x], dim=1)
    return out

def get_act(act):
    if act == 'relu':
        return nn.ReLU()
    elif act == 'lrelu':
        return nn.LeakyReLU()
    else:
        return nn.ReLU()


class SpeakerEncoder(nn.Module):
    
    '''
    reference from speaker-encoder of AdaIN-VC: https://github.com/jjery2243542/adaptive_voice_conversion/blob/master/model.py
    '''
    
    def __init__(self, c_in=80, c_h=128, c_out=256, kernel_size=5,
            bank_size=8, bank_scale=1, c_bank=128, 
            n_conv_blocks=6, n_dense_blocks=6, 
            subsample=[1, 2, 1, 2, 1, 2], act='relu', dropout_rate=0):
        super(SpeakerEncoder, self).__init__()

        self.encoder=nn.Linear(512, 256)
    def forward(self, x):
        
        # x:  B,F,T
        s= self.encoder(x)
        return s


class Encoder(nn.Module):
    '''
    reference from: https://github.com/bshall/VectorQuantizedCPC/blob/master/model.py
    '''
    def __init__(self, in_channels, channels, n_embeddings, z_dim, c_dim):
        super(Encoder, self).__init__()
        
        # self.encoder=nn.Sequential(
        #     nn.Conv1d(1024, 512, 3, 1, 1, bias=False),
        #     nn.ReLU(),
        #     nn.Conv1d(512, 256, 3, 1, 1, bias=False),
        #     nn.ReLU(),
        #     nn.Conv1d(256, 64, 3, 1, 1, bias=False)
        # )
        self.encoder=nn.Conv1d(1024, 64, 3, 1, 1, bias=False)

    def forward(self, Hubert_vec):
        
        #B,F,T
        z= self.encoder(Hubert_vec)
        z=z.permute(0,2,1)
        return z

class Encoder_BN(nn.Module):
    '''
    reference from: https://github.com/bshall/VectorQuantizedCPC/blob/master/model.py
    '''
    def __init__(self):
        super(Encoder_BN, self).__init__()
        
        self.encoder=nn.Conv1d(256+64, 64, 3, 1, 1, bias=False)

    def forward(self, z, spk_embs):
        
        #z: B,T,F
        
        spk_embs_exp = spk_embs.unsqueeze(1).expand(-1,z.shape[1],-1)
        
        # print("spk_embs_exp",spk_embs_exp.shape)
        # print("z",z.shape)
        
        tmp=torch.cat([z,spk_embs_exp],dim=-1) #B,T,F
        tmp=tmp.permute(0,2,1) #B,F,T
        BN= self.encoder(tmp)
        BN=BN.permute(0,2,1)
        return BN #B,T,F


