import torch.nn as nn
import torch
from torch.autograd import Variable
from resnet import *
## LSTM-AE model
# concat
class LSTMAEsigm(nn.Module):

    def __init__(self, num_features=39, num_audiof=128, num_bsf=2, num_blendshapes=51, is_concat=True):
        super(LSTMAEsigm, self).__init__()
        # assign
        self.is_concat = is_concat

        ## encoder
        # audio part with LSTM
        self.rnn = nn.LSTM(input_size=num_features, hidden_size=256, num_layers=1,
                        batch_first=True, dropout=0.5, bidirectional=True)
        
        

        self.audio_fc = nn.Linear(256*2, num_audiof)

        # blendshape part with fc
        self.bs_fc = nn.Sequential(
            nn.Linear(num_blendshapes, 24),
            nn.ReLU(True),

            nn.Linear(24, num_bsf),
            nn.Sigmoid() # constrain to 0~1, control variable
        )

        ## decoder?
        if self.is_concat:
            self.decoder_fc = nn.Sequential(
                nn.Linear(num_audiof+num_bsf, 64),
                nn.ReLU(True),
                nn.Linear(64, num_blendshapes),
                nn.Sigmoid()
            )
        else:
            self.bilinear = nn.Bilinear(num_bsf, num_audiof, num_audiof)
            self.decoder_fc = nn.Sequential(
                nn.Linear(num_audiof, 64),
                nn.ReLU(True),
                nn.Linear(64, num_blendshapes),
                nn.Sigmoid()
            )

    def fuse(self, audio_z, bs_z):
        # concat or bilinear
        if self.is_concat:
            return torch.cat((audio_z, bs_z), dim=1)
        else:
            return self.bilinear(bs_z, audio_z)

    def decode(self, z):
        return self.decoder_fc(z)

    def decode_audio(self, audio, bs_z):
        audio_rnn, _ = self.rnn(audio)
        audio_z = self.audio_fc(audio_rnn[:, -1, :])
        bs_z = bs_z.repeat(audio_z.size()[0], 1) # to batch size

        z = self.fuse(audio_z, bs_z)
        return self.decode(z)

    def forward(self, audio, blendshape):
        # encode
        audio_rnn, _ = self.rnn(audio)
        audio_z = self.audio_fc(audio_rnn[:, -1, :])
        

        bs_z = self.bs_fc(blendshape)

        # decode
        z = self.fuse(audio_z, bs_z)
        output = self.decode(z)

        return audio_z, bs_z, output

class LSTMAEdist(nn.Module):

    def __init__(self, num_features=39, num_audiof=128, num_bsf=2, num_blendshapes=51, is_concat=True):
        super(LSTMAEdist, self).__init__()
        # assign
        self.is_concat = is_concat

        ## encoder
        # audio part with LSTM
        self.rnn = nn.LSTM(input_size=num_features, hidden_size=256, num_layers=2,
                        batch_first=True, dropout=0.5, bidirectional=True)
        
        
        self.audio_fc = nn.Linear(256*2, num_audiof)

        # blendshape part with fc
        self.bs_fc1 = nn.Sequential(
            nn.Linear(num_blendshapes, 24),
            nn.ReLU(True),
        )
        self.bs_fc21 = nn.Linear(24, num_bsf)
        self.bs_fc22 = nn.Linear(24, num_bsf)

        ## decoder?
        if self.is_concat:
            self.decoder_fc = nn.Sequential(
                nn.Linear(num_audiof+num_bsf, 64),
                nn.ReLU(True),
                nn.Linear(64, num_blendshapes),
                nn.Sigmoid()
            )
        else:
            self.bilinear = nn.Bilinear(num_bsf, num_audiof, num_audiof)
            self.decoder_fc = nn.Sequential(
                nn.Linear(num_audiof, 64),
                nn.ReLU(True),
                nn.Linear(64, num_blendshapes),
                nn.Sigmoid()
            )

    def encode(self, audio, blendshape):
        audio_rnn, _ = self.rnn(audio)
        audio_z = self.audio_fc(audio_rnn[:, -1, :])

        bs_h1 = self.bs_fc1(blendshape)
        return audio_z, self.bs_fc21(bs_h1), self.bs_fc22(bs_h1)

    def fuse(self, audio_z, bs_z):
        # concat or bilinear
        if self.is_concat:
            return torch.cat((audio_z, bs_z), dim=1)
        else:
            return self.bilinear(bs_z, audio_z)

    def reparameterize(self, mu, logvar):
        if self.training:
            std = torch.exp(0.5*logvar)
            # eps = torch.randn_like(std)
            # eps = torch.randn(std.size(), dtype=std.dtype, layout=std.layout, device=std.device)
            eps = Variable(torch.randn(std.size())).cuda()
            return eps.mul(std).add_(mu)
        else:
            return mu

    def decode(self, z):
        return self.decoder_fc(z)

    def decode_audio(self, audio, bs_z):
        audio_rnn, _ = self.rnn(audio)
        audio_z = self.audio_fc(audio_rnn[:, -1, :])
        bs_z = bs_z.repeat(audio_z.size()[0], 1)

        z = self.fuse(audio_z, bs_z)
        return self.decode(z)

    def forward(self, audio, blendshape):
        # encode
        audio_z, bs_mu, bs_logvar = self.encode(audio, blendshape)
        bs_z = self.reparameterize(bs_mu, bs_logvar)

        # decode
        z = self.fuse(audio_z, bs_z)
        output = self.decode(z)

        return audio_z, bs_z, output, bs_mu, bs_logvar

class LSTMAE2dist(nn.Module):

    def __init__(self, num_features=39, num_audiof=128, num_blendshapes=51, is_concat=True):
        super(LSTMAE2dist, self).__init__()
        # assign
        self.is_concat = is_concat
        self.num_audiof = num_audiof
        

        ## encoder
        # audio part with LSTM
        self.rnn = nn.LSTM(input_size=num_features, hidden_size=256, num_layers=2,
                        batch_first=True, dropout=0.5, bidirectional=True)
        self.audio_fc11 = nn.Linear(512, num_audiof)
        self.audio_fc12 = nn.Linear(512, num_audiof)
  

        self.resnet = ResNet(BasicBlock, [2, 2, 2, 2],1,51)
        
        
        # 提取特征层
        self.features = nn.Sequential(
            # 卷积层
            # 输入图像通道为 1，因为我们使用的是黑白图，单通道的
            # 输出通道为32（代表使用32个卷积核），一个卷积核产生一个单通道的特征图
            # 卷积核kernel_size的尺寸为 3 * 3，stride 代表每次卷积核的移动像素个数为1
            # padding 填充，为1代表在图像长宽都多了两个像素
            nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1),# ((28-3+2*1)/1)+1=28  28*28*1  》 28*28*32
 
            # 批量归一化，跟上一层的out_channels大小相等，以下的通道规律也是必须要对应好的
            nn.BatchNorm2d(num_features=1),   #28*28*32  》 28*28*32
 
            # 激活函数，inplace=true代表直接进行运算
            nn.ReLU(inplace=True),
            # nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),   #  ((28-3+2*1)/1)+1=28     28*28*32 》 28*28*32
            # nn.BatchNorm2d(32),
            # nn.ReLU(inplace=True),
 
            # 最大池化层
            # kernel_size 为2 * 2的滑动窗口
            # stride为2，表示每次滑动距离为2个像素
            # 经过这一步，图像的大小变为1/4，即 28 * 28 -》 7 * 7
            # nn.MaxPool2d(kernel_size=2, stride=2),
            # nn.Conv2d(32, 64, kernel_size=3, padding=1),
            # nn.BatchNorm2d(64),
            # nn.ReLU(inplace=True),
            # nn.Conv2d(64, 64, kernel_size=3, padding=1),
            # nn.BatchNorm2d(64),
            # nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=4, stride=2)
        )
        # 分类层
        self.classifier = nn.Sequential(
            # Dropout层
            # p = 0.5 代表该层的每个权重有0.5的可能性为0
            nn.Dropout(p=0.5),
            # 这里是通道数64 * 图像大小7 * 7，然后输入到512个神经元中
            nn.Linear(1 * 64 * 256, num_audiof),
            nn.BatchNorm1d(num_audiof),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            # nn.Linear(512, 512),
            # nn.BatchNorm1d(512),
            # nn.ReLU(inplace=True),
            # nn.Dropout(p=0.5),
            # nn.Linear(512, 10),
        )

        

        ## decoder?
        if self.is_concat:
            self.decoder_fc = nn.Sequential(
                nn.Linear(num_audiof, 64),
                nn.ReLU(True),
                nn.Linear(64, num_blendshapes),
                nn.Sigmoid()
            )
        else:
            # self.bilinear = nn.Bilinear(num_bsf, num_audiof, num_audiof)
            # self.decoder_fc = nn.Sequential(
            #     nn.Linear(num_audiof, 64),
            #     nn.ReLU(True),
            #     nn.Linear(64, num_blendshapes),
            #     nn.Sigmoid()
            # )
            print('NO bilinear combination in 2dist model')

    def encode(self, audio):

        audio_rnn, _ = self.rnn(audio)
        # x = audio_rnn
        audio_h = audio_rnn[:, -1, :]
        
        # audio_rnn = torch.unsqueeze(audio_rnn,1)      
        # audio_h = self.resnet(audio_rnn)
          
        # # 经过特征提取层
        # x = self.features(audio_rnn)
        # # # 输出结果必须展平成一维向量
        # audio_h = x.reshape(x.size(0), -1)
        # audio_h = self.classifier(x)



        # bs_h1 = self.bs_fc1(blendshape)

        return self.audio_fc11(audio_h), self.audio_fc12(audio_h)

    # def fuse(self, audio_z, bs_z):
    #     # concat or bilinear
    #     if self.is_concat:
    #         return torch.cat((audio_z, bs_z), dim=1)
    #     else:
    #         return self.bilinear(bs_z, audio_z)

    def reparameterize(self, mu, logvar):
        std = torch.exp(0.5*logvar)
        # eps = torch.randn_like(std)
        # eps = torch.randn(std.size(), dtype=std.dtype, layout=std.layout, device=std.device)
        eps = Variable(torch.randn(std.size())).cuda()
        return eps.mul(std).add_(mu)

    def decode(self, z):
        return self.decoder_fc(z)


    def forward(self, audio):
        # encode
        audio_mu, audio_logvar = self.encode(audio)
        mu = audio_mu#torch.cat((audio_mu, bs_mu), dim=1)
        logvar = audio_logvar#torch.cat((audio_logvar, bs_logvar), dim=1)
      
        z = self.reparameterize(mu, logvar)

        # decode
        # z = self.fuse(audio_z, bs_z)
        output = self.decode(z)
        # output = audio_h

        # return z[:, :self.num_audiof], z[:, self.num_audiof:], output, mu, logvar
        return output


class Audio2BSModel(nn.Module):

    def __init__(self):
        super(Audio2BSModel, self).__init__()
      



    def forward(self, audio):
        # encode
        audio_mu, audio_logvar = self.encode(audio)
        mu = audio_mu#torch.cat((audio_mu, bs_mu), dim=1)
        logvar = audio_logvar#torch.cat((audio_logvar, bs_logvar), dim=1)
      
        z = self.reparameterize(mu, logvar)

        # decode
        # z = self.fuse(audio_z, bs_z)
        output = self.decode(z)
        # output = audio_h

        # return z[:, :self.num_audiof], z[:, self.num_audiof:], output, mu, logvar
        return output
