from ezdlops import BaseTrainer, to_persistent
import torch
from torch import nn
from torch.nn import functional as F
from ave2flame.networks import AudioEncoder
from tqdm import tqdm
from typing import Tuple

class BFMDecoder(nn.Module):
  _id_shape = 100
  _expr_shape = 79
  
  def __init__(self, last_activate=True):
    super().__init__()
    self.shape_encode = nn.Sequential(
        nn.Linear(self._id_shape, 128),
        nn.LeakyReLU(0.02, True),
        nn.Linear(128, 64),
        nn.Softplus()
    )
    
    input_dim = 64 + 512
    
    self.expr_mu = nn.Sequential(
        nn.Linear(input_dim, 256),
        nn.LeakyReLU(0.02, True),
        nn.Linear(256, self._expr_shape)
    )
    
    seq = [
        nn.Linear(input_dim, 256),
        nn.LeakyReLU(0.02, True),
        nn.Linear(256, self._expr_shape),
    ]
    if last_activate:
      seq.append(nn.Softplus())
    self.expr_var = nn.Sequential(*seq)
    
  def forward(self, 
              aud_feat: torch.Tensor, 
              shape_feat: torch.Tensor=None
              ) -> Tuple[torch.Tensor, torch.Tensor]:
    if shape_feat is None:
      shape_feat = torch.zeros(aud_feat.size(0), self._id_shape, device=aud_feat.device)
    shape_enc = self.shape_encode(shape_feat)
    latent_feat = torch.cat([aud_feat, shape_enc], dim=-1)
    
    expr_var = self.expr_var(latent_feat)
    expr_mu = self.expr_mu(latent_feat)
  
    return expr_mu, expr_var
    

@to_persistent
class AveFace2BFM_Diff(BaseTrainer):
  
  _expr_shape = 79
  
  def __init__(self, 
               ave_path: str=None, 
               device: str='cpu',
               ):
    super().__init__()
    self.ave = AudioEncoder().to(device).eval()
    if not ave_path is None:
      ckpt = torch.load(ave_path)
      self.ave.load_state_dict({f'audio_encoder.{k}': v for k, v in ckpt.items()})
    
    self.decoder = BFMDecoder().to(device)
    self.decoder_diff = BFMDecoder().to(device)
    self.device = device
    self.max_epoch = 300
    
    self.to(device)
    self.set_dir()
  
  def loss(self, y, mu, var):
    """
    half gaussian nll
    """
    var = var + torch.finfo(var.dtype).eps
    loss = torch.log(var) + (y - mu)**2 / var
    return torch.mean(loss)  # 返回批次的平均损失
  
  def forward(self, aud, shape_feat, target_cur, target_prev):
    # gt
    with torch.no_grad():
      expr = target_cur[:, :self._expr_shape]
      expr_pre = target_prev[:, :self._expr_shape]
      
      ygt = torch.concat([expr], dim=-1)
      ygt_pre = torch.concat([expr_pre], dim=-1)
      ygt_diff = ygt - ygt_pre
     # pred current frame
      aud_feat = self.ave(aud)
      
    expr_mu, expr_var = self.decoder.forward(aud_feat, shape_feat)
    mu = torch.concat([expr_mu], dim=-1)
    var = torch.concat([expr_var], dim=-1)
    # pred change
    expr_mu_diff, expr_var_diff = self.decoder_diff(aud_feat, shape_feat)
    mu_diff = torch.concat([expr_mu_diff], dim=-1)
    var_diff = torch.concat([expr_var_diff], dim=-1)
    mu_pre = mu - mu_diff
    var_pre = var_diff + var # 认为相互独立, 则方差相加
    
    # loss
    loss_nll_cur = self.loss(ygt, mu, var)
    loss_nll_pre = self.loss(ygt_pre, mu_pre, var_pre)
    loss_nll_diff = self.loss(ygt_diff, mu_diff, var_diff)
    return loss_nll_cur, loss_nll_pre, loss_nll_diff
  
  def fit(self, dataloader, optimizer, add_noise=True):  
    for _ in range(self.max_epoch):
      dataiter = dataloader.__iter__()
      progress = tqdm(dataiter)
      loss_epoch = 0.
      for idx, data in enumerate(progress):
          aud_feat = data[0]
          
          flame_params, flame_params_pre, shape_feat = data[1]
          aud_feat = aud_feat.to(self.device)
          shape_feat = shape_feat.to(self.device)
          # 添加shape噪音
          if add_noise:
            shape_feat_noise = shape_feat.abs() * torch.randn_like(shape_feat) * (idx / self.max_epoch) # 逐步加大噪音
            shape_feat.add_(shape_feat_noise)
          
          flame_params = flame_params.to(self.device)
          flame_params_pre = flame_params_pre.to(self.device)
          
          loss_nll_cur, loss_nll_pre, loss_nll_diff = self(aud_feat, shape_feat, flame_params, flame_params_pre)
          loss = 0.5 * loss_nll_cur + 0.25 * loss_nll_pre + 0.25 * loss_nll_diff
          optimizer.zero_grad()
          loss.backward()
          optimizer.step()
          loss_epoch += loss.item()
          progress.set_description(f'epoch: {self._epoch}, loss: {loss.item():.3f}, loss_avg: {loss_epoch / (idx + 1):.3f}')   
          
          if self._iteration % 100 == 0:
            self._tb_writer.add_scalar('loss/loss_nll_cur', loss_nll_cur.item(), self._iteration)
            self._tb_writer.add_scalar('loss/loss_nll_pre', loss_nll_pre.item(), self._iteration)
            self._tb_writer.add_scalar('loss/loss_nll_diff', loss_nll_diff.item(), self._iteration)
          
          self._iteration += 1
          
      self._epoch += 1
      self._tb_writer.add_scalar('loss/loss_epoch', loss_epoch / idx, self._iteration)
      if self._epoch % 20 == 0:  
        self.save_auto()  