
import torch
from torch import nn
from torch.nn import functional as F
import torch.distributions as dist
from typing import Literal
import math, numpy as np
from pathlib import Path
from .flamelbs import FlameLBS
from .deformation import DeformNetwork
from typing import NamedTuple, List, Dict
from .utils import build_quaternion, quaternion_multiply, comput_v_rotate_quat
        
# %% 四元数的激活函数表达
def angle_axis_to_quat(data: torch.Tensor):
  theta, x, y, z = data.unbind(dim=-1)
  v = torch.stack([x, y, z], dim=-1)
  v_norm = F.normalize(v, dim=-1)
  wc = torch.cos(theta / 2).unsqueeze(-1)
  ws = torch.sin(theta / 2).unsqueeze(-1)
  return torch.cat([wc, ws * v_norm], dim=-1)
        
# %% Flame Gaussian Splatting
class FlameGS(nn.Module):
  # fixed properties
  _camera_scale = 1. # 缩放倍率需要和scale缩放值挂钩
  _origin_pts_gap = 4e-4 # vhap原空间下的点间最小距离
  
  def __init__(self, 
               sh_degree:int=3,
               output_shs: bool = False,
               device = torch.device('cuda:0'),
               flamelbs:FlameLBS = None,
               ):
    super().__init__()
    if flamelbs is None:
      flamelbs = FlameLBS() # base lbs model
    self.flamelbs = flamelbs
    
    self.max_sh_degree = sh_degree  
    self.output_shs = output_shs
    self.binding_num = self.flamelbs.vertices_num
    self.binding = nn.Parameter(torch.arange(self.binding_num), requires_grad=False)
    
    self._rotation_init = nn.Parameter(comput_v_rotate_quat(self.flamelbs.n_static), requires_grad=False) # N, 4
    
    self.shs_coef_num = shs_coef_num = sum([2 * i + 1 for i in range(self.max_sh_degree+1)])
    self._xyz = nn.Parameter(torch.zeros(self.binding_num, 3), requires_grad=True)
    self._features_dc = nn.Parameter(torch.zeros(self.binding_num, 1, 3), requires_grad=True) # N, 1, 3
    self._features_rest = nn.Parameter(torch.zeros(self.binding_num, shs_coef_num-1, 3), requires_grad=True) # N, Σ_{sh_degree}(2i+1), 3
    self._scaling = nn.Parameter(torch.zeros(self.binding_num, 2), requires_grad=True) # N, 2
    self._rotation = nn.Parameter(self._rotation_init, requires_grad=True) # N, 4
    self._opacity = nn.Parameter(torch.zeros(self.binding_num, 1), requires_grad=True) # N, 1
    
    self.percent_dense = 0.
    self.spatial_lr_scale = 1.
    self.scene_extent = 0.
    
    init_bias = self._origin_pts_gap * self._camera_scale
    beta = 1e4 / self._camera_scale
    self.scaling_activation = lambda x: F.softplus(x + init_bias, beta=beta)
    self.inverse_scaling_activation = lambda y: y - init_bias # 简易inverse
    
    self.opacity_activation = torch.sigmoid
    self.inverse_opacity_activation = lambda x:torch.log(x/(1-x+1e-8)) # inverse sigmoid
    
    # self.rotation_activation = lambda x: angle_axis_to_quat(x)
    self.rotation_activation = lambda x: F.normalize(x, dim=-1)
    
    self.color_activation = torch.sigmoid   
    self.inverse_color_activation = lambda x:(x[:, None, :] - 0.5) / 0.28209479177387814 # 由rgb_color转shs_0
    
    self.device = device
    
  @property
  def active_pts_num(self):
    return self._xyz.size(0)
    
  @property
  def raw_xyz(self):
    _xyz = self._xyz
    return self._xyz / self._camera_scale + self.flamelbs.v_static[self.binding]
    
  @property
  def xyz(self):
    _xyz = self._xyz
    return self._xyz + self.flamelbs.v_static[self.binding] * self._camera_scale
    
  @property
  def color(self):
    color = self._features_dc[:, 0, :]
    return self.color_activation(color) 

  @property
  def shs(self):
    features_dc = self._features_dc
    features_rest = self._features_rest
    return torch.cat((features_dc, features_rest), dim=1) # shape: [pts_num, shs_coef_num, 3]
  
  @property
  def scaling(self):
    return self.scaling_activation(self._scaling)
  
  @property
  def opacity(self):
    return self.opacity_activation(self._opacity)
  
  @property
  def rotation(self):
    return self.rotation_activation(self._rotation)
  
  @property
  def binding_counter(self):
    return torch.bincount(self.binding, 
                          minlength=self.binding_num)

  def camera_scale_up(self, scale_ratio:float):
    self._camera_scale *= scale_ratio
    with torch.no_grad():
      self._xyz.data = self._xyz.data * scale_ratio      
    
  def camera_set_scale(self, scale:float):
    self._camera_scale = scale 
    
  def get_xyz(self, vtx: torch.Tensor, translation: torch.Tensor = None):
    xyz = self._xyz
    xyz = xyz[None, ...] + vtx[:, self.binding] * self._camera_scale
    if translation is not None:
      xyz = xyz + translation[:, None, :] * self._camera_scale # translate B, 3   
    return xyz
    
  def get_gs_params(self, 
                    vtx: torch.Tensor, 
                    transmat: torch.Tensor,
                    translation: torch.Tensor = None,
                    ):
    B = vtx.size(0)
    xyz = self.get_xyz(vtx, translation)
    v_quat = self.transmat2quat(transmat)
    scaling = self.scaling[None].repeat(B, 1, 1)
    rotation = quaternion_multiply(self.rotation[None].repeat(B, 1, 1).view(-1, 4), v_quat).view(B, -1, 4)
    opacity = self.opacity[None].repeat(B, 1, 1)
    feature = self.shs[None].repeat(B, 1, 1, 1) if self.output_shs else self.color[None].repeat(B, 1, 1) 
    return xyz, scaling, rotation, opacity, feature
  
  def get_gs_params_static(self, 
                    vtx: torch.Tensor, 
                    transmat: torch.Tensor,
                    deformer: DeformNetwork = None,
                    translation: torch.Tensor = None,
                    ):
    B = vtx.size(0)
    xyz = self.get_xyz(vtx, translation)
    # xyz = self.xyz[None].repeat(B, 1, 1) + translation[:, None, :] * self._camera_scale # 纯静态
    _, _scaling, _quat, _opacity, _feature = deformer.forward_static(self.xyz[None]) # 纯静态的合成
    _quat_new = self._rotation_init[None, self.binding] + _quat
    quat = self.rotation_activation(_quat_new).repeat(B, 1, 1)
    v_quat = self.transmat2quat(transmat)
    rotation = quaternion_multiply(quat.view(-1, 4), v_quat).view(B, -1, 4)
    # scaling = self.scaling_activation(_scaling).repeat(B, 1, 1)
    scaling = self.scaling[None].repeat(B, 1, 1)
    opacity = self.opacity_activation(_opacity).repeat(B, 1, 1)
    _feature = _feature.repeat(B, 1, 1, 1)
    feature = _feature if self.output_shs else self.color_activation(_feature[:, :, 0, :])  
    self.replace_kernel(None, _quat_new[0], _opacity[0], _feature[0])  
    return xyz, scaling, rotation, opacity, feature
  
  def replace_kernel(self, scaling, rotation, opacity, feature):
    with torch.no_grad():
      if scaling is not None: self._scaling.data = scaling 
      if rotation is not None: self._rotation.data = rotation
      if opacity is not None: self._opacity.data = opacity
      if feature is not None: 
        self._features_dc.data = feature[:, :1, :]
        self._features_rest.data = feature[:, 1:, :]    
  
  def get_gs_params_deform(self, 
                        vtx: torch.Tensor, 
                        transmat: torch.Tensor,
                        deformer: DeformNetwork,
                        extra_feats: Dict,
                        translation: torch.Tensor = None,
                        ):
    B = vtx.size(0)
    xyz = vtx[:, self.binding] * self._camera_scale
    xyz = xyz + self._xyz[None, :] # B, N, 3
    v_quat = self.transmat2quat(transmat)
    d_xyz, d_scaling, d_quat, d_opacity, d_feature = deformer.forward_dynamic(xyz, **extra_feats) # 动态生成
    quat = self.rotation_activation(self._rotation[None] + d_quat)
    rotation = quaternion_multiply(quat.view(-1, 4), v_quat).view(B, -1, 4)
    scaling = self.scaling_activation(self._scaling[None] + d_scaling).repeat(B, 1, 1)
    opacity = self.opacity_activation(self._opacity[None] + d_opacity).repeat(B, 1, 1)
    feature = self.shs[None] + d_feature
    feature = feature if self.output_shs else self.color_activation(feature[:, :, 0, :])  
    if translation is not None:
      xyz += translation[:, None, :] * self._camera_scale # translate B, 3
    final_xyz = xyz + d_xyz
    return final_xyz, scaling, rotation, opacity, feature
    
  def transmat2quat(self, transmat: torch.Tensor):
    return build_quaternion(transmat[:, self.binding, :3, :3].view(-1, 3, 3)) # B*N, 4
    
  def forward(self,
              shape: torch.Tensor,
              expr: torch.Tensor,
              rotation: torch.Tensor,
              neck: torch.Tensor,
              jaw: torch.Tensor,
              eyes: torch.Tensor,
              translation: torch.Tensor,
              static_offset: torch.Tensor = None, # personal shape offset
              deformer: DeformNetwork = None,
              ave_feature: torch.Tensor = None,
              ):
    B = shape.size(0)
    with torch.no_grad():
      vertices, v_transmat, joints =  self.flamelbs.forward(shape, expr, rotation, neck, jaw, eyes, None, static_offset)
    if deformer is None:
      return self.get_gs_params(vertices, v_transmat, translation=translation)
    else:
      extra_feats = [shape, expr, rotation, neck, jaw, eyes, ave_feature]
      return self.get_gs_params_deform(vertices, v_transmat, deformer, extra_feats, translation=translation)
  
  def save_ply(self, path):
    try:
      from plyfile import PlyData, PlyElement
    except ImportError:
      raise ImportError('Please install the plyfile package using pip install plyfile')
    path = Path(path).expanduser().resolve()
    path.parent.mkdir(parents=True, exist_ok=True)

    xyz = self.xyz.detach().cpu().numpy()
    normals = np.zeros_like(xyz)
    feat2data = lambda x:x.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
    if self.output_shs:
      f_dc = feat2data(self._features_dc)
      f_rest = feat2data(self._features_rest)
    else:
      f_dc = feat2data(self.inverse_color_activation(self.color))
      f_rest = feat2data(self._features_rest)
    opacities = self._opacity.detach().cpu().numpy()
    scale = self.inverse_scaling_activation(self.scaling).detach().cpu().numpy() # 由于原始的scaling需适应训练过程,经过归一处理,此处直接用log返回对应静态空间的
    rotation = self._rotation.detach().cpu().numpy()
    
    l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
    # All channels except the 3 DC
    for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
        l.append('f_dc_{}'.format(i))
    for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
        l.append('f_rest_{}'.format(i))
    l.append('opacity')
    for i in range(self._scaling.shape[1]):
        l.append('scale_{}'.format(i))
    for i in range(self._rotation.shape[1]):
        l.append('rot_{}'.format(i))
    dtype_full = [(attribute, 'f4') for attribute in l]

    elements = np.empty(xyz.shape[0], dtype=dtype_full)
    attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
    elements[:] = list(map(tuple, attributes))
    el = PlyElement.describe(elements, 'vertex')
    PlyData([el]).write(str(path))
    
    
# %% Flame Gaussian Splatting with Face Binding

class FlameGsFB(FlameGS):
  def __init__(self, 
               sh_degree:int=3,
               output_shs: bool = False,
               device = torch.device('cuda:0'),
               flamelbs:FlameLBS = None,
               ):
    super().__init__(sh_degree, output_shs, device, flamelbs)
    self.binding_face = True
    self.binding_num = self.flamelbs.faces_num
    self.binding = nn.Parameter(torch.arange(self.binding_num), requires_grad=False)
    
    self._rotation_init = nn.Parameter(comput_v_rotate_quat(self.flamelbs.face_normals), requires_grad=False) # N, 4
    
    self.shs_coef_num = shs_coef_num = sum([2 * i + 1 for i in range(self.max_sh_degree+1)])
    self._xyz = nn.Parameter(torch.zeros(self.binding_num, 3), requires_grad=True)
    self._features_dc = nn.Parameter(torch.zeros(self.binding_num, 1, 3), requires_grad=True) # N, 1, 3
    self._features_rest = nn.Parameter(torch.zeros(self.binding_num, shs_coef_num-1, 3), requires_grad=True) # N, Σ_{sh_degree}(2i+1), 3
    self._scaling = nn.Parameter(torch.zeros(self.binding_num, 2), requires_grad=True) # N, 2
    self._rotation = nn.Parameter(self._rotation_init, requires_grad=True) # N, 4
    self._opacity = nn.Parameter(torch.zeros(self.binding_num, 1), requires_grad=True) # N, 1
    
  @property
  def raw_xyz(self):
    xyz = F.softmax(self._xyz, dim=-1) # N * 3
    face_vertices = self.flamelbs.v_static[self.flamelbs.faces][self.binding] # N * 3 * 3
    return torch.einsum('nki,nk->ni', face_vertices, xyz) # N * 3 * 3 * 3 -> N * 3
    
  @property
  def xyz(self):
    xyz = F.softmax(self._xyz, dim=-1) # N * 3
    face_vertices = self.flamelbs.v_static[self.flamelbs.faces][self.binding] # N * 3 * 3
    return torch.einsum('nki,nk->ni', face_vertices, xyz) * self._camera_scale
  
  def get_xyz(self, vtx: torch.Tensor, translation: torch.Tensor = None):
    xyz = F.softmax(self._xyz, dim=-1) # N * 3
    xyz = xyz[None, ...]
    face_vertices = vtx[:, self.flamelbs.faces][:, self.binding] # B * N * 3 * 3
    xyz = torch.einsum('bnki,bnk->bni', face_vertices, xyz) * self._camera_scale
    if translation is not None:
      xyz = xyz + translation[:, None, :] * self._camera_scale # translate B, 3   
    return xyz
  
  def transmat2quat(self, transmat: torch.Tensor):
    xyz = F.softmax(self._xyz, dim=-1) # N * 3
    xyz = xyz[None, ...]
    transmat_binding = transmat[:, self.flamelbs.faces, :3, :3][:, self.binding]
    transmat_face = torch.einsum('bnkij,bnk->bnij', transmat_binding, xyz)
    return build_quaternion(transmat_face.reshape(-1, 3, 3)) # B*N, 4