import torch
import torch.nn as nn

from extractor import MFCCExtractor, LFCCExtractor,MelSpectrogramExtractor
from lstm import *


class Audio2BSNvidiaNet(nn.Module):
    def __init__(self, n_bs: int):
        super().__init__()
        self.n_bs = n_bs
        self.wav2feature = None        
        self.bone = NvidiaNet(n_bs)
        
    def forward(self, x):
        bs = x.size(0)
        x = self.wav2feature(x)
        x = self.bone(x)
        return x

class MFCCAudio2BSNvidiaNet(Audio2BSNvidiaNet):
     def __init__(self, n_bs: int):
        super().__init__(n_bs) 
       
        self.wav2feature = MFCCExtractor(
                sample_rate=16000,
                n_mfcc=39,
                out_dim=32,
                win_length=(int)(16000.0 / 30),
                n_fft=1024,
                normalize=False,
            )   

class Audio2BSLSTM(nn.Module):
    def __init__(self, n_bs: int):
        super().__init__()
        self.n_bs = n_bs
        self.wav2feature = None        
        self.bone = LSTMNvidiaNet(n_bs)
        
    def forward(self, x):
        bs = x.size(0)
        x = self.wav2feature(x)
        x = self.bone(x)
        return x

class MFCCAudio2BSLSTM(Audio2BSLSTM):
     def __init__(self, n_bs: int):
        super().__init__(n_bs) 
       
        self.wav2feature = MFCCExtractor(
                sample_rate=16000,
                n_mfcc=39,
                out_dim=32,
                win_length=(int)(16000.0 / 30),
                n_fft=1024,
                normalize=False,
            )    
        
class LFCCAudio2BSLSTM(Audio2BSLSTM):
    def __init__(self, n_bs: int):
        super().__init__(n_bs) 
        
        self.wav2feature = LFCCExtractor(
                sample_rate=16000,
                n_lfcc=39,
                out_dim=32,
                win_length=(int)(16000.0 / 30),
                n_fft=1024,
                normalize=False,
            )   

class MelSpectrogramAudio2BSLSTM(Audio2BSLSTM):
    def __init__(self, n_bs: int):
        super().__init__(n_bs) 
        
        self.wav2feature = MelSpectrogramExtractor(
                sample_rate=16000,
                n_mels=39,
                out_dim=32,
                win_length=(int)(16000.0 / 30),
                n_fft=1024,
                normalize=False,
            )   
    
class Audio2BS(nn.Module):
    """https://research.nvidia.com/sites/default/files/publications/karras2017siggraph-paper_0.pdf"""

    def __init__(self, n_bs: int):
        super().__init__()
        self.n_bs = n_bs

        self.wav2feature = MFCCExtractor(
            sample_rate=16000,
            n_mfcc=32,
            out_dim=53,
            win_length=160 * 2,
            n_fft=1024,
            normalize=False,
        )
        self.analysis_net = nn.Sequential(
            nn.Conv2d(1, 72, kernel_size=(1, 3), stride=(1, 2), padding=(0, 1)),
            nn.BatchNorm2d(72),
            nn.ReLU(),
            nn.Conv2d(72, 108, kernel_size=(1, 3), stride=(1, 2), padding=(0, 1)),
            nn.BatchNorm2d(108),
            nn.ReLU(),
            nn.Conv2d(108, 162, kernel_size=(1, 3), stride=(1, 2), padding=(0, 1)),
            nn.BatchNorm2d(162),
            nn.ReLU(),
            nn.Conv2d(162, 243, kernel_size=(1, 3), stride=(1, 2), padding=(0, 1)),
            nn.BatchNorm2d(243),
            nn.ReLU(),
            nn.Conv2d(243, 256, kernel_size=(1, 3), stride=(1, 2), padding=(0, 1)),
            nn.BatchNorm2d(256),
            nn.ReLU(),
        )

        self.articulation_net = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0)),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0)),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0)),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.BatchNorm2d(256),
            nn.Conv2d(256, 256, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0)),
            nn.ReLU(),
            nn.BatchNorm2d(256),
            nn.Conv2d(256, 256, kernel_size=(4, 1), stride=(4, 1)),
            nn.ReLU(),
        )

        self.output_net = nn.Sequential(
            nn.Linear(256, 72),
            nn.Linear(72, 128),
            nn.Tanh(),
            nn.Linear(128, 50),
            nn.Linear(50, self.n_bs),
        )

    def forward(self, x):
        bs = x.size(0)
        x = self.wav2feature(x)
        x = x.unsqueeze(1)
        # x = self.instance_norm(x)
        x = self.analysis_net(x)
        x = self.articulation_net(x)
        x = x.view(x.size(0), -1)
        x = self.output_net(x)
        return x



class FaceFormerLoss:
    def __init__(self, k_rec, k_vel,n_size = 3) -> None:
        # self.mse = torch.nn.MSELoss()
        self.k_rec = k_rec
        self.k_vel = k_vel
        self.loss = VocaLoss(k_rec, k_vel,n_size)

    def __call__(self, pred, gt):
        gt = gt.squeeze(0)
        pred = pred.squeeze(0)
        # drop last frame if the number of frames is odd
        if gt.shape[0] % 2 != 0:
            gt = gt[:-1]
            pred = pred[:-1]

        return self.loss(pred, gt)["loss"]

        # rec_loss = self.mse(pred, gt)

        # return {"loss": rec_loss, "rec_loss": rec_loss}
    
    def __str__(self):
        return f"FaceFormerLoss-k_rec={self.k_rec}-k_vel={self.k_vel}"

class VocaLoss:
    def __init__(self, k_rec: float = 1.0, k_vel: float = 10.0,n_size = 3):
        self.k_rec = k_rec
        self.k_vel = k_vel
        self.n_size = n_size

    def reconstruction_loss(self, pred, gt):
        return torch.mean(torch.sum((pred - gt) ** 2, axis=2))

    def velocity_loss(self, pred, gt):
        n_consecutive_frames = 2
        pred = pred.view(-1, n_consecutive_frames, self.n_verts, self.n_size)
        gt = gt.view(-1, n_consecutive_frames, self.n_verts, self.n_size)

        v_pred = pred[:, 1] - pred[:, 0]
        v_gt = gt[:, 1] - gt[:, 0]

        return torch.mean(torch.sum((v_pred - v_gt) ** 2, axis=2))

    def __call__(self, pred, gt):
        bs = pred.shape[0]
        gt = gt.view(bs, -1, self.n_size)
        pred = pred.view(bs, -1, self.n_size)
        self.n_verts = pred.shape[1]

        rec_loss = self.reconstruction_loss(pred, gt)
        vel_loss = self.velocity_loss(pred, gt)

        return {
            "loss": rec_loss * self.k_rec + vel_loss * self.k_vel,
            "rec_loss": rec_loss,
            "vel_loss": vel_loss,
        }
        