import torch.nn as nn
import torch
import numpy as np

# GLU  门控线性单元Gated linear units
# 模块 对输入信号进行注意力选择
class GLU(nn.Module):
    def __init__(self):
        super(GLU, self).__init__()
        
    def forward(self, x,gated_x):
        return x * torch.sigmoid(gated_x)


class downSample(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super(downSample, self).__init__()

        self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                                 out_channels=out_channels,
                                                 kernel_size=kernel_size,
                                                 stride=stride,
                                                 padding=padding),
                                       nn.InstanceNorm2d(num_features=out_channels,
                                                         affine=True))
        self.convLayer_gates = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                                       out_channels=out_channels,
                                                       kernel_size=kernel_size,
                                                       stride=stride,
                                                       padding=padding),
                                             nn.InstanceNorm2d(num_features=out_channels,
                                                               affine=True))
                                                               
        self.glu_layer = GLU()
    def forward(self, x):
        # GLU
        return self.glu_layer(self.convLayer(x),self.convLayer_gates(x))
        
        
class upSample(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super(upSample, self).__init__()

        self.convLayer = nn.Sequential(nn.ConvTranspose2d(in_channels=in_channels,
                                                 out_channels=out_channels,
                                                 kernel_size=kernel_size,
                                                 stride=stride,
                                                 padding=padding),
                                       nn.InstanceNorm2d(num_features=out_channels,
                                                         affine=True))
        self.convLayer_gates = nn.Sequential(nn.ConvTranspose2d(in_channels=in_channels,
                                                       out_channels=out_channels,
                                                       kernel_size=kernel_size,
                                                       stride=stride,
                                                       padding=padding),
                                             nn.InstanceNorm2d(num_features=out_channels,
                                                               affine=True))
                                                               
        self.glu_layer = GLU()
    def forward(self, x):
        # GLU
        return self.glu_layer(self.convLayer(x),self.convLayer_gates(x))
        
        
class Generator(nn.Module):
    def __init__(self,n_spk):
        super(Generator, self).__init__()
        self.n_spk = n_spk
        
        self.downSample_1 = downSample(in_channels=1,  # TODO 1 ?
                                out_channels=32,
                                kernel_size=(3, 9),
                                stride=(1, 1),
                                padding=(1, 4))
                                
        self.downSample_2 = downSample(32,64,(4, 8),(2, 2),(1, 3))

        self.downSample_3 = downSample(64,128,(4, 8),(2, 2),(1, 3))

        self.downSample_4 = downSample(128,64,(3, 5),(1, 1),(1, 2))

        self.downSample_5 = downSample(64,5,(9, 5),(9, 1),(4, 2))
        
        self.upSample_1 = upSample(in_channels=5 + self.n_spk,  # TODO 1 ?
                                   out_channels=64,
                                   kernel_size=(9, 5),
                                   stride=(9, 1),
                                   padding=(0, 2))
                                   
                                   
        self.upSample_2 = upSample(64+ self.n_spk, 128, (3,5),(1,1),(1,2))
                                   
        self.upSample_3 = upSample(128+ self.n_spk, 64, (4,8),(2,2),(1,3))                           
        
        self.upSample_4 = upSample(64+ self.n_spk, 32, (4,8),(2,2),(1,3))

        self.deCNN = nn.ConvTranspose2d(in_channels=32 + self.n_spk,
                                        out_channels=1,
                                        kernel_size=(3,9),
                                        stride=(1,1),
                                        padding=(1,4))
                                        
    def forward(self,input,lab):
        
        c = lab.view(lab.size(0), lab.size(1), 1, 1) #[B,n_spk,1,1]
        
        # 下采样
        
        Down_1 = self.downSample_1(input)  #[B,32,36,T]
        Down_2 = self.downSample_2(Down_1) #[B,64,18,T/2]
        Down_3 = self.downSample_3(Down_2) #[B,128,9,T/4]
        Down_4 = self.downSample_4(Down_3) #[B,64,9,T/4] 
        Down_5 = self.downSample_5(Down_4) #[B,5,1,T/4]
        
        # 上采样
        c1 = c.repeat(1, 1, Down_5.size(2), Down_5.size(3)) #[B,n_spk,1,T/4]
        in_Up_1 = torch.cat([Down_5, c1], dim=1) # [B,5+n_spk,1,T/4]
        Up_1 = self.upSample_1(in_Up_1)          # [B,64,9,T/4]
        
        
        c2 = c.repeat(1, 1, Up_1.size(2), Up_1.size(3))
        in_Up_2 = torch.cat([Up_1, c2], dim=1)  # [B,64+n_spk,9,T/4]
        Up_2 = self.upSample_2(in_Up_2)         # [B,128,9,T/4]
        
        c3 = c.repeat(1, 1, Up_2.size(2), Up_2.size(3))
        in_Up_3 = torch.cat([Up_2, c3], dim=1)  # [B,128+n_spk,9,T/4]
        Up_3 = self.upSample_3(in_Up_3)         # [B,64,18,T/2]
        
        
        c4 = c.repeat(1, 1, Up_3.size(2), Up_3.size(3))
        in_Up_4 = torch.cat([Up_3, c4], dim=1)  #  [B,64+n_spk,18,T/2]
        Up_4 = self.upSample_4(in_Up_4)         #  [B,32,36,T]  
        
        
        c5 = c.repeat(1, 1, Up_4.size(2), Up_4.size(3))
        in_Up_5 = torch.cat([Up_4, c5], dim=1) #   [B,32+n_spk,36,T]
        out = self.deCNN(in_Up_5)              #   [B,1,36,T]
        return out


class Discriminator(nn.Module):
    def __init__(self,n_spk):
        super(Discriminator, self).__init__()
        
        self.n_spk = n_spk
        
        self.downSample_1 =  downSample(in_channels=1+self.n_spk,  # TODO 1 ?
                                out_channels=32,
                                kernel_size=(3, 9),
                                stride=(1, 1),
                                padding=(1, 4))
                                
        self.downSample_2 = downSample(32+self.n_spk,32,(3,8),(1,2),(1,3))
        
        self.downSample_3 = downSample(32+self.n_spk,32,(3,8),(1,2),(1,3))
        
        self.downSample_4 = downSample(32+self.n_spk,32,(3,6),(1,2),(1,2))
        
        self.downSample_5 = nn.Conv2d(32+self.n_spk,1,(36,5),(36,1),(0,2))
        
        
    def forward(self,input,lab):
        
        c = lab.view(lab.size(0), lab.size(1), 1, 1)        
    
        
        c1 = c.repeat(1, 1, input.size(2), input.size(3))
        in_Down_1 = torch.cat([input, c1], dim=1)
        Down_1 = self.downSample_1(in_Down_1)
        
        c2 = c.repeat(1, 1, Down_1.size(2), Down_1.size(3))
        in_Down_2 = torch.cat([Down_1, c2], dim=1)
        Down_2 = self.downSample_2(in_Down_2)
        
        
        c3 = c.repeat(1, 1, Down_2.size(2), Down_2.size(3))
        in_Down_3 = torch.cat([Down_2, c3], dim=1)
        Down_3 = self.downSample_3(in_Down_3)
        
        c4 = c.repeat(1, 1, Down_3.size(2), Down_3.size(3))
        in_Down_4 = torch.cat([Down_3, c4], dim=1)
        Down_4 = self.downSample_4(in_Down_4)
        
        c5 = c.repeat(1, 1, Down_4.size(2), Down_4.size(3))
        in_Down_5 = torch.cat([Down_4, c5], dim=1)
        Down_5 = self.downSample_5(in_Down_5)
        
        out = torch.sigmoid(Down_5)
        
        return out
        
        
class DomainClassifier(nn.Module):
    def __init__(self,n_spk):
        super(DomainClassifier, self).__init__()
        
        self.n_spk = n_spk
        
        self.downSample_1 = downSample(in_channels=1,  # TODO 1 ?
                                out_channels=8,
                                kernel_size=(4, 4),
                                stride=(2, 2),
                                padding=(1, 1))
                                
        self.downSample_2 = downSample(8,16,(4,4),(2,2),(1,1))
        
        self.downSample_3 = downSample(16, 32, (2,4), (2,2), (0,1))
        
        self.downSample_4 = downSample(32, 16, (1,4), (1,2), (0,1))
        
        self.downSample_5 = nn.Conv2d(16, n_spk, (1,4), (1,2), (0,1))
        self.avg_pooling = nn.AdaptiveAvgPool2d((1,1))
        self.softmax_out = nn.LogSoftmax(dim=1)
    
    def forward(self,input):
        

        input = input[:, :, 0:8, :]
        D_1 = self.downSample_1(input)
        D_2 = self.downSample_2(D_1)
        D_3 = self.downSample_3(D_2)
        D_4 = self.downSample_4(D_3)
        D_5 = self.downSample_5(D_4)
        avg_pool = self.avg_pooling(D_5)
        avg_pool = avg_pool.view(avg_pool.size(0),-1)
        # out = self.softmax_out(avg_pool)
        
        return avg_pool
        
        
        
        
        
        
        



        
        
        
        
        
        
        
        
        
        
        
        
if __name__ == "__main__":
   
    # model = DomainClassifier(3)
    
    # data = torch.randn(1,36,300)
   
    # out = model(data)
    # print(out.shape)
   
    # model = Discriminator(3)
    # lab = torch.from_numpy(np.array([[1,0,0]]))
    # print(lab.shape)
    # data = torch.randn(1,36,300)

    # out = model(data,lab)
    # print(out.shape)
    
    model = Generator(6)
    lab = torch.from_numpy(np.array([[1,0,0,0,0,0]]))
    print(lab.shape)
    data = torch.randn(1,1,36,1152)

    out = model(data,lab)
    print(out.shape)
    
    # data = torch.randn(1, 8, 1, 75)  
    # model = upSample(in_channels=8,  # TODO 1 ?
                   # out_channels=64,
                   # kernel_size=(9, 5),
                   # stride=(9, 1),
                   # padding=(0, 2))

    # out = model(data)
    # print(out.shape)
    
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        


