from ezdlops import BaseTrainer, to_persistent
import torch
from torch import nn
from torch.nn import functional as F
from ave2flame.networks import AudioEncoder
from tqdm import tqdm

# %% functions
def quaternion_multiply(q1, q2): # q1 * q2
  assert q1.size(-1) == 4
  assert q1.size() == q2.size()
  w1, x1, y1, z1 = q1.unbind(-1)
  w2, x2, y2, z2 = q2.unbind(-1)
  w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
  x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
  y = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
  z = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2
  return torch.stack([w, x, y, z], dim=-1)

def quaternion_conjugate(q):
  assert q.size(-1) == 4
  w, x, y, z = q.unbind(-1)
  return torch.stack([w, -x, -y, -z], dim=-1)

def axis_angle_to_quaternion(axis_angle):
  assert axis_angle.size(-1) == 3
  angle = axis_angle.norm(dim=-1, keepdim=True)
  axis = axis_angle / angle
  w = torch.cos(angle / 2)
  xyz = torch.sin(angle / 2) * axis
  return torch.cat([w, xyz], dim=-1)

def quaternion_to_axis_angle(q):
  assert q.size(-1) == 4
  q_norm = q / q.norm(dim=-1, keepdim=True) # normalize
  w, xyz = q_norm.split([1, 3], dim=-1)
  angle = 2 * torch.acos(w) # range [0, 2pi]
  axis = xyz / torch.sin(angle / 2) 
  return angle * axis
  

# %%
class FlameDecoderQuat(nn.Module):
  
  @staticmethod
  def angles_to_unit_vector(phi, theta):
    """
    将方位角和俯仰角转换为单位向量。
    """
    x = torch.cos(theta) * torch.cos(phi)
    y = torch.cos(theta) * torch.sin(phi)
    z = torch.sin(theta)
    return torch.stack([x, y, z], dim=-1)
  
  def __init__(self, n_joints=1, expr_shape=100):
    super().__init__()
    self._n_joints = n_joints
    self._expr_shape = expr_shape
    
    self.shape_encode = nn.Sequential(
        nn.Linear(300, 128),
        nn.LeakyReLU(0.02, True),
        nn.Linear(128, 64),
        nn.Softplus()
    )
    
    input_dim = 64 + 512
    
    self.expr_mu_var = nn.Sequential(
        nn.Linear(input_dim, 256),
        nn.LeakyReLU(0.02, True),
        nn.Linear(256, 2 * self._expr_shape) # 2(mu, sigma)
    )
    
    self.quat_mu_var = nn.Sequential(
        nn.Linear(input_dim, 256),
        nn.LeakyReLU(0.02, True), 
        nn.Linear(256, 2 * 4 * self._n_joints), # joints * [w, x, y, z] * [mu, sigma]
    )
    
  def forward(self, aud_feat, shape_feat):
    B = aud_feat.size(0)
    assert shape_feat.size(0) == B
    shape_enc = self.shape_encode(shape_feat)
    latent_feat = torch.cat([aud_feat, shape_enc], dim=-1)
    
    expr_mu_var = self.expr_mu_var(latent_feat)
    expr_mu, expr_var = expr_mu_var.chunk(2, dim=-1)
    
    quat_mu_var = self.quat_mu_var(latent_feat)
    quat_mu, quat_var = quat_mu_var.chunk(2, dim=-1)
    
    expr_mu = torch.tanh(expr_mu)
    expr_var = torch.sigmoid(expr_var)
    quat_mu = torch.tanh(quat_mu)
    quat_mu_norm = F.normalize(quat_mu.view(B, self._n_joints, 4), p=2, dim=-1).view(B, -1)
    quat_var = torch.sigmoid(quat_var)
  
    return expr_mu, expr_var, quat_mu_norm, quat_var
  
  def to_flame_feat(self, expr_mu, expr_var, quat_mu, quat_var):
    axis_angle = quaternion_to_axis_angle(quat_mu.view(-1, self._n_joints, 4)) # (B, n joint, 4) -> (B, n joint, 3 axis)
    return expr_mu, axis_angle
    

@to_persistent
class AveFace2Flame_Quat(BaseTrainer):
  _n_joints = 1
  _expr_shape = 100
  
  def __init__(self, 
               ave_path: str=None, 
               device: str='cpu',
               ):
    super().__init__()
    self.ave = AudioEncoder().to(device).eval()
    ckpt = torch.load(ave_path)
    self.ave.load_state_dict({f'audio_encoder.{k}': v for k, v in ckpt.items()})
    
    self.decoder = FlameDecoderQuat(self._n_joints, self._expr_shape).to(device)
    self.decoder_diff = FlameDecoderQuat(self._n_joints, self._expr_shape).to(device)
    self.device = device
    self.max_epoch = 300
    
    self.to(device)
    self.set_dir()
  
  def loss(self, y, mu, var):
    """
    half gaussian nll
    """
    var = var + torch.finfo(var.dtype).eps
    loss = torch.log(var) + (y - mu)**2 / var
    return torch.mean(loss)  # 返回批次的平均损失
  
  def forward(self, aud, shape_feat, target_cur, target_prev):
    # gt
    with torch.no_grad():
      expr = target_cur[:, :self._expr_shape]
      pose = target_cur[:, self._expr_shape:].view(-1, self._n_joints, 3) # (B, joint, 3 axis)
      quat = axis_angle_to_quaternion(pose) # (B, joint, 4)
      
      expr_pre = target_prev[:, :self._expr_shape]
      pose_pre = target_prev[:, self._expr_shape:].view(-1, self._n_joints, 3) # (B, 1 joint, 3 axis)
      quat_pre = axis_angle_to_quaternion(pose_pre) # (B, 1 joint, 4)
      
      ygt = torch.concat([expr, quat.view(-1, self._n_joints * 4)], dim=-1)
      ygt_pre = torch.concat([expr_pre, quat_pre.view(-1, 4)], dim=-1)
      
     # pred current frame
      aud_feat = self.ave(aud)
      
    expr_mu, expr_var, quat_mu, quat_var = self.decoder(aud_feat, shape_feat)
    expr_mu_diff, expr_var_diff, quat_mu_diff, quat_var_diff = self.decoder_diff(aud_feat, shape_feat)
    
    mu_cur = torch.concat([expr_mu, quat_mu], dim=-1)
    var_cur = torch.concat([expr_var, quat_var], dim=-1)
    
    expr_mu_pre = expr_mu_diff + expr_mu.data
    quat_mu_pre = quaternion_multiply(quaternion_conjugate(quat_mu_diff), quat_mu.data) # q0 = q*(-1) * q1
    mu_pre = torch.concat([expr_mu_pre, quat_mu_pre], dim=-1)
    
    quat_var_pre = quat_var.data + quat_var_diff # 独立同高斯分布 方差相加
    expr_var_pre = expr_var.data + expr_var_diff
    var_pre = torch.concat([expr_var_pre, quat_var_pre], dim=-1)
    
    # loss
    loss_nll_cur = self.loss(ygt, mu_cur, var_cur)
    loss_nll_pre = self.loss(ygt_pre, mu_pre, var_pre)
    return loss_nll_cur, loss_nll_pre
  
  def fit(self, dataloader, optimizer):
    for _ in range(self.max_epoch):
      dataiter = dataloader.__iter__()
      progress = tqdm(dataiter)
      loss_epoch = 0.
      for idx, data in enumerate(progress):
          aud_feat = data[0]
          
          flame_params, flame_params_pre, shape_feat = data[1]
          aud_feat = aud_feat.to(self.device)
          shape_feat = shape_feat.to(self.device)
          flame_params = flame_params.to(self.device)
          flame_params_pre = flame_params_pre.to(self.device)
          
          loss_nll_cur, loss_nll_pre = self(aud_feat, shape_feat, flame_params, flame_params_pre)
          loss = 0.75 * loss_nll_cur + 0.25 * loss_nll_pre
          optimizer.zero_grad()
          loss.backward()
          optimizer.step()
          loss_epoch += loss.item()
          progress.set_description(f'epoch: {self._epoch}, loss: {loss.item():.3f}, loss_avg: {loss_epoch / (idx + 1):.3f}')   
          
          if self._iteration % 100 == 0:
            self._tb_writer.add_scalar('loss/loss_nll_cur', loss_nll_cur.item(), self._iteration)
            self._tb_writer.add_scalar('loss/loss_nll_pre', loss_nll_pre.item(), self._iteration)
            self._tb_writer.add_scalar('loss/loss_all', loss.item(), self._iteration)
          
          self._iteration += 1
          
      self._epoch += 1
      self._tb_writer.add_scalar('loss/loss_epoch', loss_epoch / idx, self._epoch)
      if self._epoch % 20 == 0:  
        self.save_auto()  