import torch.nn as nn
import torch
import numpy as np

# GLU  门控线性单元Gated linear units
# 模块 对输入信号进行注意力选择
class GLU(nn.Module):
    def __init__(self):
        super(GLU, self).__init__()
        
    def forward(self, x,gated_x):
        return x * torch.sigmoid(gated_x)
        


# 类似 反卷积的操作
# 输入特征 [B,C,D]
# 输出特征 [B,C/scale,D*scale]
# 通过对数据展开再重新排列的过程
# 降低通道的维度，增加特征的维度
# 可以用反卷积操作替换
class PixelShuffle(nn.Module):
    def __init__(self, upscale_factor):
        super(PixelShuffle, self).__init__()
        self.upscale_factor = upscale_factor

    def forward(self, x):
        n = x.shape[0]
        c_out = x.shape[1] // 2
        w_new = x.shape[2] * 2
        return x.view(n, c_out, w_new)
        
        
# 残差连接层
class ResidualLayer(nn.Module):
    def __init__(self, in_channels, mid_channels, kernel_size, stride, padding):
        super(ResidualLayer, self).__init__()

        self.conv1d_layer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
                                                    out_channels=mid_channels,
                                                    kernel_size=kernel_size,
                                                    stride=1,
                                                    padding=padding),
                                          nn.InstanceNorm1d(num_features=mid_channels,
                                                            affine=True))

        self.conv_layer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
                                                        out_channels=mid_channels,
                                                        kernel_size=kernel_size,
                                                        stride=1,
                                                        padding=padding),
                                              nn.InstanceNorm1d(num_features=mid_channels,
                                                                affine=True))

        self.conv1d_out_layer = nn.Sequential(nn.Conv1d(in_channels=mid_channels,
                                                        out_channels=in_channels,
                                                        kernel_size=kernel_size,
                                                        stride=1,
                                                        padding=padding),
                                              nn.InstanceNorm1d(num_features=in_channels,
                                                                affine=True))
        self.glu_layer = GLU()

    def forward(self, x):
        h1_norm = self.conv1d_layer(x)
        h1_gates_norm = self.conv_layer_gates(x)

        # GLU
        h1_glu = self.glu_layer(h1_norm,h1_gates_norm)  # in_channel-> mid_channel

        h2_norm = self.conv1d_out_layer(h1_glu)    # mid_channel->in_channel
        return x + h2_norm


class downSample_Generator(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super(downSample_Generator, self).__init__()

        self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                                 out_channels=out_channels,
                                                 kernel_size=kernel_size,
                                                 stride=stride,
                                                 padding=padding),
                                       nn.InstanceNorm2d(num_features=out_channels,
                                                         affine=True))
        self.convLayer_gates = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                                       out_channels=out_channels,
                                                       kernel_size=kernel_size,
                                                       stride=stride,
                                                       padding=padding),
                                             nn.InstanceNorm2d(num_features=out_channels,
                                                               affine=True))
                                                               
        self.glu_layer = GLU()
    def forward(self, x):
        # GLU
        return self.glu_layer(self.convLayer(x),self.convLayer_gates(x))


class upSample_Generator(nn.Module):
    def __init__(self,in_channels, out_channels, kernel_size, stride, padding):
        super(upSample_Generator,self).__init__()
        
        self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                                 out_channels=out_channels,
                                                 kernel_size=kernel_size,
                                                 stride=stride,
                                                 padding=padding),
                                       nn.PixelShuffle(upscale_factor=2),
                                       nn.InstanceNorm2d(
                                           num_features=out_channels // 4,
                                           affine=True))
                                           
        self.convLayer_gate = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                                 out_channels=out_channels,
                                                 kernel_size=kernel_size,
                                                 stride=stride,
                                                 padding=padding),
                                       nn.PixelShuffle(upscale_factor=2),
                                       nn.InstanceNorm2d(
                                           num_features=out_channels // 4,
                                           affine=True))
        
        self.glu_layer = GLU()
    
    def forward(self,x):
        return self.glu_layer(self.convLayer(x),self.convLayer_gate(x))
                                       
   
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()

        # 第一层 2D 卷积
        self.conv1 = nn.Conv2d(in_channels=1,  # TODO 1 ?
                               out_channels=128,
                               kernel_size=(5, 15),
                               stride=(1, 1),
                               padding=(2, 7))

        self.conv1_gates = nn.Conv2d(in_channels=1,  # TODO 1 ?
                                     out_channels=128,
                                     kernel_size=(5, 15),
                                     stride=1,
                                     padding=(2, 7))
        self.conv1_glu = GLU()
                                     

        # 2D 下采样 Layer
        self.downSample1 = downSample_Generator(in_channels=128,
                                                out_channels=256,
                                                kernel_size=5,
                                                stride=2,
                                                padding=2)

        self.downSample2 = downSample_Generator(in_channels=256,
                                                out_channels=256,
                                                kernel_size=5,
                                                stride=2,
                                                padding=2)

        # 2D -> 1D 转换
        self.conv2dto1dLayer = nn.Sequential(nn.Conv1d(in_channels=2304,
                                                       out_channels=256,
                                                       kernel_size=1,
                                                       stride=1,
                                                       padding=0),
                                             nn.InstanceNorm1d(num_features=256,
                                                               affine=True))

        # 残差连接 Blocks  6 层
        self.residualLayer1 = ResidualLayer(in_channels=256,
                                            mid_channels=512,
                                            kernel_size=3,
                                            stride=1,
                                            padding=1)
        self.residualLayer2 = ResidualLayer(in_channels=256,
                                            mid_channels=512,
                                            kernel_size=3,
                                            stride=1,
                                            padding=1)
        self.residualLayer3 = ResidualLayer(in_channels=256,
                                            mid_channels=512,
                                            kernel_size=3,
                                            stride=1,
                                            padding=1)
        self.residualLayer4 = ResidualLayer(in_channels=256,
                                            mid_channels=512,
                                            kernel_size=3,
                                            stride=1,
                                            padding=1)
        self.residualLayer5 = ResidualLayer(in_channels=256,
                                            mid_channels=512,
                                            kernel_size=3,
                                            stride=1,
                                            padding=1)
        self.residualLayer6 = ResidualLayer(in_channels=256,
                                            mid_channels=512,
                                            kernel_size=3,
                                            stride=1,
                                            padding=1)

        # 1D -> 2D Conv
        self.conv1dto2dLayer = nn.Sequential(nn.Conv1d(in_channels=256,
                                                       out_channels=2304,
                                                       kernel_size=1,
                                                       stride=1,
                                                       padding=0),
                                             nn.InstanceNorm1d(num_features=2304,
                                                               affine=True))

        # UpSample Layer
        self.upSample1 = upSample_Generator(in_channels=256,
                                       out_channels=1024,
                                       kernel_size=5,
                                       stride=1,
                                       padding=2)

        self.upSample2 = upSample_Generator(in_channels=256,
                                       out_channels=512,
                                       kernel_size=5,
                                       stride=1,
                                       padding=2)

        self.lastConvLayer = nn.Conv2d(in_channels=128,
                                       out_channels=1,
                                       kernel_size=(5, 15),
                                       stride=(1, 1),
                                       padding=(2, 7))



    def forward(self, input): #[B,36,T]
        input = input.unsqueeze(1) #  [B,1,36,T]
        conv1 = self.conv1_glu(self.conv1(input),self.conv1_gates(input)) # [B,128,36,T]
        
        # DownloadSample
        downsample1 = self.downSample1(conv1) # [B,256,18,T/2]
        downsample2 = self.downSample2(downsample1) #[B,256,9,T/4]
        
        # 2D -> 1D
        reshape2dto1d = downsample2.view(downsample2.size(0), 2304, 1, -1) #[B,2304,1,T/4]
        reshape2dto1d = reshape2dto1d.squeeze(2)  # [B,2304,T/4]
        conv2dto1d_layer = self.conv2dto1dLayer(reshape2dto1d) # [B,256,T/4]
        
        # 残差连接
        residual_layer_1 = self.residualLayer1(conv2dto1d_layer) # [B,256,T/4]
        residual_layer_2 = self.residualLayer2(residual_layer_1) # [B,256,T/4]
        residual_layer_3 = self.residualLayer3(residual_layer_2) # [B,256,T/4]
        residual_layer_4 = self.residualLayer4(residual_layer_3) # [B,256,T/4]
        residual_layer_5 = self.residualLayer5(residual_layer_4) # [B,256,T/4]
        residual_layer_6 = self.residualLayer6(residual_layer_5) # [B,256,T/4]

        # 1D -> 2D
        conv1dto2d_layer = self.conv1dto2dLayer(residual_layer_6) # [B,2304,T/4]
        reshape1dto2d = conv1dto2d_layer.unsqueeze(2)             # [B,2304,1,T/4]
        reshape1dto2d = reshape1dto2d.view(reshape1dto2d.size(0), 256, 9, -1) #[B,256,9,T/4]
        
        # UpSample
        upsample_layer_1 = self.upSample1(reshape1dto2d) # [B,1024,9,T/4]
                                                         # [B,256,18,T/2]
        
        upsample_layer_2 = self.upSample2(upsample_layer_1) # [B,512,18,T/2]
                                                            # [B,128,36,T]

        output = self.lastConvLayer(upsample_layer_2) #[B,1,36,T]
        output = output.squeeze(1) #[B,36,T]
        return output
        

class downSample_Discriminator(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super(downSample_Discriminator, self).__init__()

        self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                                 out_channels=out_channels,
                                                 kernel_size=kernel_size,
                                                 stride=stride,
                                                 padding=padding),
                                       nn.InstanceNorm2d(num_features=out_channels,
                                                         affine=True))
        self.convLayer_gate = nn.Sequential(nn.Conv2d(in_channels=in_channels,
                                                 out_channels=out_channels,
                                                 kernel_size=kernel_size,
                                                 stride=stride,
                                                 padding=padding),
                                       nn.InstanceNorm2d(num_features=out_channels,
                                                         affine=True))
        self.glu_layer = GLU()

    def forward(self, x):
        return self.glu_layer(self.convLayer(x),self.convLayer_gate(x))


# 鉴别器  PatchGAN
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()

        self.convLayer1 = nn.Conv2d(in_channels=1,
                                    out_channels=128,
                                    kernel_size=(3, 3),
                                    stride=(1, 1),
                                    padding=(1, 1))
                                    
        self.convLayer1_gate = nn.Conv2d(in_channels=1,
                                    out_channels=128,
                                    kernel_size=(3, 3),
                                    stride=(1, 1),
                                    padding=(1, 1))
                                    
        self.glu_convLayer1 = GLU()

        # DownSample Layer
        self.downSample1 = downSample_Discriminator(in_channels=128,
                                                    out_channels=256,
                                                    kernel_size=(3, 3),
                                                    stride=(2, 2),
                                                    padding=(1,1))

        self.downSample2 = downSample_Discriminator(in_channels=256,
                                                    out_channels=512,
                                                    kernel_size=(3, 3),
                                                    stride=[2, 2],
                                                    padding=(1,1))

        self.downSample3 = downSample_Discriminator(in_channels=512,
                                                    out_channels=1024,
                                                    kernel_size=[3, 3],
                                                    stride=[2, 2],
                                                    padding=(1,1))

        self.downSample4 = downSample_Discriminator(in_channels=1024,
                                                    out_channels=1024,
                                                    kernel_size=[1, 5],
                                                    stride=(1, 1),
                                                    padding=(0, 2))

        # Conv Layer
        self.outputConvLayer = nn.Conv2d(in_channels=1024,
                                         out_channels=1,
                                         kernel_size=(1, 3),
                                         stride=(1, 1),
                                         padding=(0,1))



    def forward(self, input): # [B,36,T]
       
        input = input.unsqueeze(1) # [B,1,36, T]
        
        conv_layer_1_in = self.convLayer1(input)
        conv_layer_1_gate =  self.convLayer1_gate(input)
        conv_layer_1 = self.glu_convLayer1(conv_layer_1_in,conv_layer_1_gate) # [B,128,D,T]
       
        downsample1 = self.downSample1(conv_layer_1) # [B,256,18,T/2]
        downsample2 = self.downSample2(downsample1)  # [B,512,9,T/4]
        downsample3 = self.downSample3(downsample2)  # [B,1024,5,T/8]
        downsample4 = self.downSample4(downsample3)  # [B,1024,5,T/8]
        
        output = self.outputConvLayer(downsample4) #[B,1,5,T/8]
        output = torch.sigmoid(output)
      
        return output
        
if __name__ == "__main__":
    
    
    in_data = torch.randn(2,128,36)
    m_G = Generator()
    out_data = m_G(in_data)
    print(out_data.shape)
    
    m_D = Discriminator()
    out_D = m_D(out_data)
    print(out_D.shape)
    


        
        
        
        
        