from ezdlops import BaseTrainer, to_persistent
from ezdlops.ezdlops.visualization import image as vt
from ezdlops.ezdlops.core.dataset import ZipDataset
from ..vhap.model.tracker import GlobalTracker
from .flamegs import FlameGS
from .deformation import DeformNetwork
from .utils import get_rasterizer, projection_from_intrinsics, get_from_allmap, compute_v_normals
from .utils import build_scaling_rotation
from typing import NamedTuple, Literal, List, Tuple, Dict, Union
import torch, tqdm, lpips
import numpy as np
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import torchshow as ts
from torchvision.transforms.functional import gaussian_blur
try:
  import nvdiffrast.torch as dr
  USE_DR = True
except:
  USE_DR = False
          
# %%
lpips_model = lpips.LPIPS(net='alex').eval()
          
# %% 坐标系变换相关矩阵
class TransformMat(NamedTuple):
  viewmat: torch.Tensor
  projmat: torch.Tensor
  fullmat: torch.Tensor

# %% 训练对象
# @to_persistent
class FlameGsTrainer(BaseTrainer):
  _use_dr = True
  _add_noise = True
  _dynamic_incr = 0.02
  _init_lr = 1e-3
  _densify_interval = 100
  _dead_opacity = 0.05
  _gt_blur_kernel = 3
  _gt_blur_sigma = 3
  
  def __init__(self, 
               tracker: GlobalTracker,
               gaussian: FlameGS,
               max_pts_num: int = 100000,
               random_sample: bool = True, # 是否按概率随机采样/否则按分数排序分裂
               static_deform: bool = True,
               prefix_remain: bool = True, # 保留原始点
               densify_method: List[Literal["split", "score"]] = ["split", "score"],
               ):
    super().__init__()
    self.active_sh_degree = 0
    self.tracker = tracker
    self.gaussian = gaussian
    
    self.static_deform = static_deform
    self.deformer = DeformNetwork(
                      max_sh_degree=gaussian.max_sh_degree, 
                      bounds=gaussian._camera_scale * 0.1
                      )
    self.rasterizer_dr = None
    self.rasterizer_gs = None
    self.faces = tracker.flame.faces[(tracker.flame.faces < gaussian.flamelbs.vertices_num).all(dim=1)]
    self.static_offset = tracker.static_offset[:, :gaussian.flamelbs.vertices_num, :]
    
    self.max_pts_num = max_pts_num
    self.random_sample = random_sample
    self.prefix_remain = prefix_remain
    self.densify_method = densify_method
    
    self.binding_num = gaussian.binding_num
    # self.scheduler_camera_scaling = {3: 20, 10: 50, 20: 100} # 训练时根据阶段缩放相机大小
    self.scheduler_camera_scaling = dict([(k, v) for k, v in zip(range(20), np.linspace(1, 100, 20))])
    gaussian.flamelbs.set_v_static(tracker.shape, self.static_offset)
    
  def training_setup(self, 
                     phase: Literal["coarse", "fine", "deform", "flame"]
                     ):
    gaussian = self.gaussian
    tracker = self.tracker
    if phase=="fine":
      groups = [
        {'params': [gaussian._xyz],           'lr': self._init_lr * 1. , "name": "xyz"},
        {'params': [gaussian._features_dc],   'lr': self._init_lr * 1. , "name": "f_dc"},
        {'params': [gaussian._features_rest], 'lr': self._init_lr / 20 , "name": "f_rest"},
        {'params': [gaussian._opacity],       'lr': self._init_lr * 1. , "name": "opacity"},
        {'params': [gaussian._scaling],       'lr': self._init_lr * 1. , "name": "scaling"},
        {'params': [gaussian._rotation],      'lr': self._init_lr * 1. , "name": "rotation"}
      ]  
    if phase=="coarse":
      groups = [
        {'params': [gaussian._xyz], 
             'lr': self._init_lr / 1e0 , "name": "xyz"},
        {'params': [gaussian._scaling], 
             'lr': self._init_lr / 1e1 , "name": "scaling"},
        {'params': [v for k, v in self.deformer.named_parameters() if k.startswith("static_")], 
             'lr': self._init_lr / 1e0 , "name": "static"},
        {'params': [v for k, v in self.deformer.named_parameters() if k.startswith("triplane")], 
             'lr': self._init_lr / 1e0 , "name": "triplane"},
      ]
    if phase=="deform":
      groups = [
        {'params': [v for k, v in self.deformer.named_parameters() if k.startswith("mlp_")], 
             'lr': self._init_lr / 1e0 , "name": "encode"},
        {'params': [v for k, v in self.deformer.named_parameters() if k.startswith("deform_")], 
             'lr': self._init_lr / 1e0 , "name": "deform"},
        {'params': [v for k, v in self.deformer.named_parameters() if k.startswith("transformer")], 
             'lr': self._init_lr / 1e0 , "name": "transform"},
      ]
    if phase=="flame":
      groups = [
        {'params': [tracker.shape],         'lr': self._init_lr * 1e-1, "name": "shape"},
        {'params': [tracker.expr],          'lr': self._init_lr * 1e-1, "name": "expr"},
        {'params': [tracker.rotation],      'lr': self._init_lr * 1e-1, "name": "rotation"},
        {'params': [tracker.neck_pose],     'lr': self._init_lr * 1e-1, "name": "neck"},
        {'params': [tracker.jaw_pose],      'lr': self._init_lr * 1e-1, "name": "jaw"},
        {'params': [tracker.eyes_pose],     'lr': self._init_lr * 1e-1, "name": "eyes"},
      ]
    self.learning_groups = groups
    self.optimizer = torch.optim.Adam(groups, eps=1e-15)
    # self.scheduler = None
    self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=500, eta_min=self._init_lr/1e2) 
  
  def reset_grad_accum(self):
    device = self.device
    with torch.no_grad():
      self.screen_pts = torch.zeros_like(self.gaussian.xyz).detach().requires_grad_(True)
      self.xyz_gradient_accum = torch.zeros((self.gaussian.active_pts_num, 1), device=device)
      self.xyz_gradient_store = torch.zeros((self.gaussian.active_pts_num, 1), device=device)
      self.denom = torch.ones((self.gaussian.active_pts_num, 1), device=device) # 避免分母为0
      self.max_radii2D = torch.ones((self.gaussian.active_pts_num, 1), device=device)
      self.region_loss:Dict[str, torch.Tensor] = {
        1: 0., # 边缘
        2: 0., # 脸部
        3: 0., # 头发
        4: 0., # 颈部
        5: 0., # 嘴
        6: 0., # 牙齿
        7: 0., # 眼球
        8: 0., # 瞳孔
      }
    
  def oneupSHdegree(self):
    if self.active_sh_degree < self.gaussian.max_sh_degree:
      self.active_sh_degree += 1
    
  def update_dr_rasterizer(self):
    try:
      self.rasterizer_dr = dr.RasterizeCudaContext()
    except:
      self._use_dr = False
      self._logger.error("initialize nvdiffrast_cuda failed, train withoud face normals and alpha maps")
      return
    device = self.device
    height, width = self.tracker.image_size
    # view matrix
    R = self.tracker.RT[:3, :3].clone().T
    T = self.tracker.RT[:3, 3].clone()
    viewmat = torch.eye(4, 
                        device=device, 
                        dtype=torch.float32)
    viewmat[:3, :3] = R
    viewmat[3, :3] = T
    # projection matrix
    f_value = self.tracker.focal_length * max(height, width)
    cx, cy = torch.tensor([[0.5*width], [0.5*height]]).to(f_value)
    intrinsic = torch.stack([f_value, f_value, cx, cy], dim=1)
    projmat = projection_from_intrinsics(intrinsic, (height, width), style="opengl")[0].T.to(device)
    self.mat_dr = TransformMat(viewmat.data, 
                               projmat.data, 
                               (viewmat @ projmat).data
                               )
    
  @property
  def _camera_scale(self):
    return self.gaussian._camera_scale
    
  @property
  def active_pts_num(self):
    return self.gaussian.active_pts_num
    
  @torch.no_grad()
  def nvdrast_normal(self, 
                     vtx, 
                     tri=None, 
                     is_flip=True
                     ) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    vtx: B, N, 3
    通过nvdiffrast计算渲染结果的法向量和alpha通道
    """
    if not self._use_dr:
      self._logger.error("failed to get dr context, please train withoud face normals and alpha maps")
      return
    height, width = self.tracker.image_size
    vtx_homo = F.pad(vtx, [0, 1], value=1.0)
    vtx_cam = vtx_homo @ self.mat_dr.viewmat 
    vtx_clip = vtx_cam @ self.mat_dr.projmat
    tri = tri if not tri is None else self.faces.int()
    rast_out, rast_out_db = dr.rasterize(self.rasterizer_dr, vtx_clip, tri, (height, width))
    if is_flip:
      rast_out = rast_out.flip(1) # shape B, H, W, 4
    v_now = vtx_cam[..., :3] # B, N, 3
    normal_attr = compute_v_normals(v_now, tri)
    normals, _ = dr.interpolate(normal_attr, rast_out, tri)
    normals = F.normalize(normals, dim=-1).permute(0, 3, 1, 2)
    normals.data[:, :, [0, 2]] *= -1 # 对齐gs空间
    alpha = rast_out[..., -2].unsqueeze(1).detach()
    return normals, alpha
    
  def update_gs_rasterizer(self, 
                           R:torch.Tensor = None,
                           ):
    device = self.device
    viewmat_trans = torch.tensor(
      [
        [1,  0,  0,],
        [0, -1,  0,],
        [0,  0, -1,],
      ], # 相机视角 绕y轴和z轴 旋转180度 (1. nvdrast空间是往-z轴看的 2. nvdrast渲染图片时左下角是原点)
      dtype=torch.float32,
    )
    height, width = self.tracker.image_size
    # view matrix
    if R is None:
      R = self.tracker.RT[:3, :3].clone().T 
    T = self.tracker.RT[:3, 3].clone() * self._camera_scale # 镜头拉长 [0, 0, cam_scale]
    viewmat = torch.eye(4, 
                        device=device, 
                        dtype=torch.float32)
    viewmat[3, :3] = -T # campos
    viewmat[:3, :3] = R @ viewmat_trans.to(R) # w2c
    # projection matrix
    f_value = self.tracker.focal_length * max(height, width) # focal_length=1.5
    cx, cy = torch.tensor([[0.5*width], [0.5*height]]).to(f_value)
    intrinsic = torch.stack([f_value, f_value, cx, cy], dim=1)
    projmat = projection_from_intrinsics(intrinsic, (height, width), style="directx")[0].T.to(device)
    # gs rasterizer
    self.reset_grad_accum()
    self.mat_gs = TransformMat(viewmat.data, 
                               projmat.data, 
                               (viewmat @ projmat).data
                               )
    self.rasterizer_gs = get_rasterizer(
            viewmat, projmat, (height, width), sh_degree=self.active_sh_degree, device=device
          )    
    
  @torch.no_grad()
  def add_densification_stats(self, 
                              viewspace_point_tensor: torch.Tensor, 
                              radiis: torch.Tensor,
                              ):
    if radiis.ndim != self.max_radii2D.ndim:
      radiis = radiis.unsqueeze(-1)
    visibility_filter = radiis > 0
    self.max_radii2D[visibility_filter] = torch.max(self.max_radii2D[visibility_filter], radiis[visibility_filter])
    self.denom[visibility_filter] += 1
    if not viewspace_point_tensor.grad is None:
      self.xyz_gradient_accum += torch.norm(viewspace_point_tensor.grad[:, :2], dim=-1, keepdim=True)
      self.xyz_gradient_store += viewspace_point_tensor.grad[:, 2:]
      viewspace_point_tensor.grad.zero_()
    
  def forward(self,
              shapes: torch.Tensor,
              exprs: torch.Tensor,
              rotations: torch.Tensor,
              necks: torch.Tensor,
              jaws: torch.Tensor,
              eyess: torch.Tensor,
              translations: torch.Tensor,
              gt_images: torch.Tensor, # 原图
              phase:Literal["coarse", "fine", "deform", "flame"]="coarse",
              avefeat:torch.Tensor=None,
              ):
    gaussian = self.gaussian
    rend_images, rend_alphas, rend_normals, rend_dists, surf_normals, surf_depths = [], [], [], [], [], []
    B = shapes.size(0)
    vertices, v_transmat, joints =  gaussian.flamelbs.forward(shapes, exprs, rotations, necks, jaws, eyess, None, self.static_offset)
    if phase!="flame":
      with torch.no_grad():
        pass
      
    if phase=="coarse":
      xyzs, scalings, quaternions, opacities, features = gaussian.get_gs_params_static(vertices, 
                                                                                        v_transmat, 
                                                                                        self.deformer,
                                                                                        translation = translations,
                                                                                        )
    elif phase=="fine":
      xyzs, scalings, quaternions, opacities, features = gaussian.get_gs_params(vertices, 
                                                                                v_transmat, 
                                                                                translation = translations,
                                                                                )
    elif phase=="deform":
      extra_feats = {
        "shape": shapes,
        "expr": exprs,
        "global_pose": rotations,
        "neck_pose": necks,
        "jaw_pose": jaws,
        "eye_pose": eyess,
        "ave": avefeat
      }
      xyzs, scalings, quaternions, opacities, features = gaussian.get_gs_params_deform(vertices, 
                                                                                        v_transmat, 
                                                                                        self.deformer,
                                                                                        extra_feats=extra_feats,
                                                                                        translation = translations,
                                                                                        )
    elif phase=="flame":
      xyzs, scalings, quaternions, opacities, features = gaussian.get_gs_params(vertices, 
                                                                                v_transmat, 
                                                                                translation = translations,
                                                                                )
    else:
      raise NotImplementedError("Phase must be 'coarse', 'fine' or 'deform'.")
    
    for xyz, scaling, quaternion, opacity, feature, rotation, neck in zip(xyzs, scalings, quaternions, opacities, features, rotations, necks):
      rend_image, radii, allmap = self.rasterizer_gs(
              means3D = xyz,
              means2D = self.screen_pts,
              shs = feature if gaussian.output_shs else None,
              colors_precomp = feature if not gaussian.output_shs else None,
              opacities = opacity,
              scales = scaling,
              rotations = quaternion, # w x y z 
              cov3D_precomp = None)
      maps = get_from_allmap(allmap, 
                            self.mat_gs.viewmat, 
                            self.mat_gs.fullmat, 
                            self.tracker.image_size)
      rend_images.append(rend_image)
      rend_alphas.append(maps["rend_alpha"]) # 1, H, W
      rend_normals.append(maps["rend_normal"]) # 3, H, W
      rend_dists.append(maps["rend_dist"]) # 1, H, W
      surf_normals.append(maps["surf_normal"]) # 3, H, W
      surf_depths.append(maps["surf_depth"]) # 1, H, W
    rend_images, rend_alphas, rend_normals, rend_dists, surf_normals, surf_depths = (
        torch.stack(x, dim=0) for x in [rend_images, rend_alphas, rend_normals, rend_dists, surf_normals, surf_depths]
    )
      
    if self._use_dr:
      gt_normals, gt_depth = self.nvdrast_normal(vertices + translations[:, None, :])
      gt_alphas = (gt_depth > 0).float()
      loss_normal = (gt_alphas * (surf_normals - gt_normals)).square().mean()
      loss_alpha = F.binary_cross_entropy(rend_alphas, gt_alphas)
    else:
      gt_alphas = torch.ones_like(rend_images)
      loss_normal = loss_alpha = torch.tensor(0.)
    # TODO: 增加SSIM
    if self._gt_blur_kernel > 0:
      gt_images = gaussian_blur(gt_images, kernel_size=self._gt_blur_kernel, sigma=self._gt_blur_sigma)
    loss_lpips = lpips_model(gt_alphas * rend_images, gt_alphas * gt_images).mean()
    image_error = (rend_images - gt_images).abs()
    loss_image = ((gt_alphas * image_error).sum(dim=(1, 2, 3)) / gt_alphas.sum(dim=(1, 2, 3))).mean()
    loss_normal_error = (1 - (rend_normals * surf_normals).sum(dim=1)).mean()
    loss_rend_dist = rend_dists.mean()
    scalings_size = scalings[..., :2].max(dim=-1).values # 如果使用norm则会导致出现细条型的核
    reg_scaling = scalings_size.mean() # reg使核大小均匀且趋向于圆形, 圆形又可以更利于static模型的训练
    # reg_opacity = ((1 - opacities) * opacities).mean()
    reg_opacity = opacities.mean()
    reg_offsets = gaussian._xyz.norm(dim=-1).mean()
    # extra
    for rid in [1, 2, 3, 5, 7, 8]:
      loss_region = self.region_loss_l2(vertices, 
                                        image_error, 
                                        rid=rid)
      self.region_loss[rid] += loss_region.item()
    if phase == "flame":
      return {"loss/image": loss_image, 
              "loss/lpips": loss_lpips, 
              "loss/alpha": loss_alpha,
              "radii": radii,
              "rend_images": rend_images.data,
              "gt_images": gt_images.data,
              "gt_alphas": gt_alphas.data,
              }
    return {"loss/image": loss_image, 
            "loss/lpips": loss_lpips, 
            "loss/alpha": loss_alpha, 
            "loss/normal_cross": loss_normal_error, 
            "loss/normal_l2": loss_normal, # 如果vhap获取的表现并不准确则不要效果更好
            "loss/distortion": loss_rend_dist,
            "loss/reg_scaling": reg_scaling,
            "loss/reg_opacity": reg_opacity,
            "loss/reg_offsets": reg_offsets,
            "radii": radii,
            "rend_images": rend_images.data,
            "gt_images": gt_images.data,
            "gt_alphas": gt_alphas.data,
            }
  
  def region_loss_l2(self, 
                     vtx: torch.Tensor,
                     error: torch.Tensor,
                     rid: torch.Tensor,
                     ):
    mask = (self.get_binding_mask(vtx, rid) > 0).float()
    return ((mask * error).sum(dim=(1, 2, 3)) / mask.sum(dim=(1, 2, 3))).sum()
    
  
  def fit(self, 
          phase:Literal["coarse", "fine", "deform", "flame"], 
          batch_size:int = 16,
          max_iteration:int = 8000,
          audio_dataset:torch.Tensor = None,
          ave_encoder:nn.Module = None,
          ):
    # Setup before training
    self.batch_size = batch_size
    self.set_dir()
    if phase=="coarse" and not self.static_deform:
      phase = "fine"
    self.training_setup(phase)
    dataloader = DataLoader(self.tracker.dataset, batch_size=self.batch_size, shuffle=True, num_workers=1)
    if phase=="deform" and not audio_dataset is None:
      dataloader = DataLoader(ZipDataset(self.tracker.dataset, audio_dataset), batch_size=self.batch_size, shuffle=True, num_workers=1)
    loss_coef = {
      "loss/lpips": 1.0, 
      "loss/image": 1.0, 
      "loss/alpha": 0.5, 
      "loss/depth": 0.5,
      "loss/normal_cross": 0.5, 
      "loss/normal_l2": 0.5, 
      "loss/distortion": 1e-2,
      "loss/reg_scaling": 1e-2,
      "loss/reg_opacity": 1e-3,
      "loss/reg_offsets": 1e-1,
    }
    relocate_num = 0
    self._iteration = self._epoch * len(dataloader)
    max_epoch = max_iteration // len(dataloader) + 1
    lpips_model.to(self.device)
    img_out_folder = (self._directory / "image").expanduser().resolve()
    img_out_folder.mkdir(parents=True, exist_ok=True)
    self.update_dr_rasterizer()
    self.update_gs_rasterizer()
    # Start training
    for epoch in range(max_epoch):
      dataiter = dataloader.__iter__()
      progress = tqdm.tqdm(dataiter)
      loss_epoch = 0.
      ave_feats = None
      
      if self._epoch in self.scheduler_camera_scaling and phase=="coarse":
        to_scale = self.scheduler_camera_scaling[epoch]
        up_ratio = to_scale / self.gaussian._camera_scale
        self.gaussian.camera_scale_up(up_ratio)
        self.deformer.set_scale(to_scale)
        self.update_gs_rasterizer()
        
      for idx, samples in enumerate(progress):
        if isinstance(samples, (tuple, list)):
          samples, aud_feats = samples
          with torch.no_grad(): 
            aud_feats = aud_feats.to(self.device)
            ave_feats = ave_encoder(aud_feats)
        idxs: torch.Tensor = samples["timestep_index"].squeeze(1)
        batchsize = idxs.size(0)
        gt_images = samples["rgb"].to(self.device).squeeze(1)
        
        shapes = self.tracker.shape[None].repeat(batchsize, 1).to(self.device)
        exprs = self.tracker.expr[idxs].to(self.device)
        rotations = self.tracker.rotation[idxs].to(self.device)
        necks = self.tracker.neck_pose[idxs].to(self.device)
        jaws = self.tracker.jaw_pose[idxs].to(self.device)
        eyess = self.tracker.eyes_pose[idxs].to(self.device)
        translations = self.tracker.translation[idxs].to(self.device)
        if phase != "flame":
          with torch.no_grad():
              pass
        
        outputs = self.forward(shapes, exprs, rotations, necks, jaws, eyess, translations, gt_images, phase, ave_feats)
        loss: torch.Tensor = 0.
        for k in outputs:
          if k.startswith("loss/"):
            loss += loss_coef.get(k, 1.0) * outputs[k]
        loss_epoch += loss.item()
        loss.backward()
        self._iteration += 1
      
        if self._iteration % 1000 == 0:
          self.oneupSHdegree()
          
        with torch.no_grad():
          self.add_densification_stats(self.screen_pts, radiis=outputs["radii"])
          self.optimizer.step()
          self.optimizer.zero_grad()
          lr = self._init_lr
          if self.scheduler is not None:
            self.scheduler.step()
            lr = self.scheduler.get_last_lr()[0]
                      
          if self._iteration % self._densify_interval == 0:
            opa = self.gaussian.opacity.squeeze(-1)
            scaling_size = self.gaussian.scaling.min(dim=-1).values
            logscale = scaling_size.log()
            dead_mask = torch.logical_or(logscale < logscale.mean() - 2 * logscale.std(), opa < self._dead_opacity)
            relocate_num = self.relocate_gs(dead_mask=dead_mask)
            
            increasable_num = self.max_pts_num - self.gaussian.active_pts_num
            if increasable_num > 0 and not phase in ["deform", "flame"]:
              linear_r = max((2 * self.max_pts_num * self._iteration / max_iteration) / self.gaussian.active_pts_num, 1.0) - 1 # 预定1/2时生成至max
              inc_r = min((increasable_num * self._dynamic_incr) / self.gaussian.active_pts_num, linear_r) / len(self.densify_method)
              
              if "score" in self.densify_method:
                self.add_new_gs(inc_r)
              
              if "split" in self.densify_method:
                mask = scaling_size >= scaling_size.quantile(1 - inc_r) # 将较大的点致密化
                self.split_gs(mask)
                
            self.reset_grad_accum()
          
        if not self._tb_writer is None:
          self._tb_writer.add_scalar('metric/ptsnum', self.active_pts_num, self._iteration)
          self._tb_writer.add_scalar('metric/lr', lr, self._iteration)
          self._tb_writer.add_scalar('metric/reclocatenum', relocate_num, self._iteration)
          if "opa" in locals():
            mask = scaling_size <= scaling_size.quantile(0.1)
            self._tb_writer.add_scalar('metric/opa_min_avg', opa.quantile(0.1).mean().item(), self._iteration)
            self._tb_writer.add_scalar('metric/opa_max_avg', opa.quantile(0.9).mean().item(), self._iteration)
            self._tb_writer.add_scalar('metric/opa_sparse', opa.std().item() , self._iteration)
            self._tb_writer.add_scalar('metric/scale_min_avg', scaling_size.quantile(0.1).mean().item(), self._iteration)
            self._tb_writer.add_scalar('metric/scale_max_avg', scaling_size.quantile(0.9).mean().item(), self._iteration)
            self._tb_writer.add_scalar('metric/scale_min_opa', opa[mask].mean().item(), self._iteration)
            self._tb_writer.add_scalar('metric/scale_min_opa_max', opa[mask].max().item(), self._iteration)
          self._tb_writer.add_scalar('loss/all', loss.item(), self._iteration)
          grad_avg_dist: torch.Tensor = (self.xyz_gradient_accum / self.denom)
          if grad_avg_dist.sum() > 0:
            self._tb_writer.add_scalar('metric/grad_max_avg', grad_avg_dist.quantile(0.9).item(), self._iteration)
            self._tb_writer.add_scalar('metric/grad_min_avg', grad_avg_dist.quantile(0.1).item(), self._iteration)
          for k in outputs:
            if k.startswith("loss/"):self._tb_writer.add_scalar(f'{k}', outputs[k].item(), self._iteration)
          
        progress.set_description(f'epoch: {self._epoch}, phase:{phase}, loss: {loss_epoch / (idx + 1):.3f}, ptsnum: {self.active_pts_num}, camera_scale: {self._camera_scale:.3f}, lr: {lr:.3f}')
      
      vt.save([outputs["gt_images"][0] * outputs["gt_alphas"][0], outputs["rend_images"][0]], img_out_folder / f"{self._epoch}.png")
      self._epoch += 1
      if self._epoch % 10 == 0:
        self.save_auto()
      torch.cuda.empty_cache()
    self.save_auto() # END of fitting
        
  def replace_tensors_to_optimizer(self, inds=None):
    gaussian = self.gaussian
    tensors_dict = {
        "xyz": gaussian._xyz,
        "f_dc": gaussian._features_dc,
        "f_rest": gaussian._features_rest,
        "opacity": gaussian._opacity,
        "scaling" : gaussian._scaling,
        "rotation" : gaussian._rotation
        }

    optimizable_tensors = {}
    for group in self.optimizer.param_groups:
      if len(group["params"]) > 1:
        continue
      tensor = tensors_dict[group["name"]]
      stored_state = self.optimizer.state.get(group['params'][0], None)
      
      if stored_state is not None:
        if inds is not None:
          stored_state["exp_avg"][inds] = 0
          stored_state["exp_avg_sq"][inds] = 0
        else:
          stored_state['step'] = torch.zeros_like(stored_state['step'])
          stored_state["exp_avg"] = torch.zeros_like(tensor)
          stored_state["exp_avg_sq"] = torch.zeros_like(tensor)

        del self.optimizer.state[group['params'][0]]
        group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
        self.optimizer.state[group['params'][0]] = stored_state
        
      else:
        pass

      optimizable_tensors[group["name"]] = group["params"][0]
    gaussian._xyz = optimizable_tensors.get("xyz", gaussian._xyz)
    gaussian._features_dc = optimizable_tensors.get("f_dc", gaussian._features_dc)
    gaussian._features_rest = optimizable_tensors.get("f_rest", gaussian._features_rest)
    gaussian._opacity = optimizable_tensors.get("opacity", gaussian._opacity)
    gaussian._scaling = optimizable_tensors.get("scaling", gaussian._scaling)
    gaussian._rotation = optimizable_tensors.get("rotation", gaussian._rotation)

    torch.cuda.empty_cache()
    return optimizable_tensors
  
  def cat_tensors_to_optimizer(self, tensors_dict):
    optimizable_tensors = {}
    for group in self.optimizer.param_groups:
      # rule out parameters that are not properties of gaussians
      if group["name"] not in tensors_dict:
        continue
      
      assert len(group["params"]) == 1
      extension_tensor = tensors_dict[group["name"]]
      stored_state = self.optimizer.state.get(group['params'][0], None)
      if stored_state is not None:

        stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
        stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)

        del self.optimizer.state[group['params'][0]]
        group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
        self.optimizer.state[group['params'][0]] = stored_state

        optimizable_tensors[group["name"]] = group["params"][0]
      else:
        group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
        optimizable_tensors[group["name"]] = group["params"][0]

    return optimizable_tensors
  
  def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
    gaussian = self.gaussian
    d = {"xyz": new_xyz,
    "f_dc": new_features_dc,
    "f_rest": new_features_rest,
    "opacity": new_opacities,
    "scaling" : new_scaling,
    "rotation" : new_rotation}

    optimizable_tensors = self.cat_tensors_to_optimizer(d)
    gaussian._xyz = optimizable_tensors.get("xyz", nn.Parameter(torch.cat((gaussian._xyz, new_xyz), dim=0), requires_grad=True))
    gaussian._features_dc = optimizable_tensors.get("f_dc", nn.Parameter(torch.cat((gaussian._features_dc, new_features_dc), dim=0), requires_grad=True))
    gaussian._features_rest = optimizable_tensors.get("f_rest", nn.Parameter(torch.cat((gaussian._features_rest, new_features_rest), dim=0), requires_grad=True))
    gaussian._opacity = optimizable_tensors.get("opacity", nn.Parameter(torch.cat((gaussian._opacity, new_opacities), dim=0), requires_grad=True))
    gaussian._scaling = optimizable_tensors.get("scaling", nn.Parameter(torch.cat((gaussian._scaling, new_scaling), dim=0), requires_grad=True))
    gaussian._rotation = optimizable_tensors.get("rotation", nn.Parameter(torch.cat((gaussian._rotation, new_rotation), dim=0), requires_grad=True))
    
  # 采样概率
  @property
  def sample_score(self):
    gaussian = self.gaussian
    score = gaussian.opacity.data[:, 0] * gaussian.scaling.data.min(dim=-1).values
    # score = self.xyz_gradient_accum[:,0] / self.denom[:,0]
    # score = (self.gaussian.opacity.data * self.xyz_gradient_accum) / (self.denom * self.max_radii2D)
    # score = self.xyz_gradient_accum / (self.denom * self.max_radii2D)
    # score = self.xyz_gradient_accum / self.denom
    # score = torch.ones(gaussian.active_pts_num, device=self.device)
    for rid in [1, 2, 3, 4, 5, 7, 8]:
      idx = self.get_binding_kernel(rid)
      score[idx] *= self.region_loss[rid] / idx.size(0)
    return score.squeeze(-1)
    
  # 根据概率(伪)采样
  def sample_alives(self, 
                     probs: torch.Tensor,
                     num: int,
                     alive_indices: torch.Tensor=None,
                     ):
    probs = probs / (probs.sum() + torch.finfo(torch.float32).eps)
    if (probs < 0).any() or probs.isnan().any() or probs.isinf().any():
      raise ValueError
    sampled_idxs = torch.multinomial(probs, num, replacement=True)
    if sampled_idxs.max() >= probs.size(0):
      raise ValueError
    if alive_indices is not None:
        sampled_idxs = alive_indices[sampled_idxs]
    return sampled_idxs
    
  def clone_params(self, idxs:torch.Tensor):
    gaussian = self.gaussian
    num = self.active_pts_num
    new_num = idxs.size(0)
    new_xyz = gaussian._xyz[idxs]
    # new_dc = self.clone_from_kcent(gaussian._features_dc.view(num, -1), idxs).view(new_num, -1, 3)
    # new_rest = self.clone_from_kcent(gaussian._features_rest.view(num, -1), idxs).view(new_num, -1, 3)
    new_dc = gaussian._features_dc[idxs]
    new_rest = gaussian._features_rest[idxs]
    new_opacity = torch.zeros_like(gaussian._opacity[idxs]) # 将opacity设置为0，初始化不透明度
    # new_scaling = self.clone_from_kcent(gaussian._scaling, idxs)
    new_scaling = gaussian._scaling[idxs]
    new_rotation = gaussian._rotation[idxs]
    return new_xyz, new_dc, new_rest, new_opacity, new_scaling, new_rotation
  
  def clone_from_kcent(self,
                         feature: torch.Tensor,
                         sample_idx: torch.Tensor,
                         ) -> torch.Tensor:
    k = feature.size(1)
    new_binding = self.gaussian.binding[sample_idx]
    face_feature_sum = torch.zeros((self.binding_num, k), device=self.device)
    face_feature_sum.scatter_add_(0, self.gaussian.binding.unsqueeze(-1).expand(-1, k), feature)
    face_feature_avg = face_feature_sum / self.gaussian.binding_counter[:, None]
    
    face_feature_cm = torch.zeros((self.binding_num, k), device=self.device)
    face_feature_cm.scatter_add_(0, self.gaussian.binding.unsqueeze(-1).expand(-1,k), feature-face_feature_avg[self.gaussian.binding])
    face_feature_var = face_feature_cm.square() / self.gaussian.binding_counter[:, None]      
    
    new_feature_avg = face_feature_avg[new_binding]
    new_feature_noise = self._init_lr * face_feature_var[new_binding].sqrt() * torch.randn_like(new_feature_avg)
    return new_feature_avg + new_feature_noise
  
  def relocate_gs(self, 
                  dead_mask:torch.Tensor = None
                  ):
    gaussian = self.gaussian
    gaussian._opacity.data[dead_mask] = -5. # 将dead opacity先设置为负值，避免显示
    if self.prefix_remain:      
      dead_mask[:gaussian.binding_num] = 0
    if dead_mask.sum() == 0:
        return 0

    alive_mask = ~dead_mask 
    dead_indices = dead_mask.nonzero(as_tuple=True)[0]
    alive_indices = alive_mask.nonzero(as_tuple=True)[0]
    
    if alive_indices.shape[0] <= 0:
        return 0

    # # sample from probs
    # probs = gaussian.opacity[alive_indices, 0]    
    probs = self.sample_score[alive_indices]
    
    reinit_num = dead_indices.shape[0]
    if self.random_sample:
      reinit_idx = self.sample_alives(alive_indices=alive_indices, probs=probs, num=reinit_num)
    else:
      reinit_idx = probs.argsort(descending=True)[:reinit_num]
    relocate_binding = gaussian.binding[reinit_idx]

    (
      gaussian._xyz[dead_indices], 
      gaussian._features_dc[dead_indices],
      gaussian._features_rest[dead_indices],
      gaussian._opacity[dead_indices],
      gaussian._scaling[dead_indices],
      gaussian._rotation[dead_indices] 
      ) = self.clone_params(reinit_idx)
    
    gaussian.binding[dead_indices] = relocate_binding    
    self.replace_tensors_to_optimizer(inds=dead_indices) 
    if self._add_noise:
      self.add_noise(torch.cat([alive_mask, dead_indices]))
    return reinit_num
      
  def add_new_gs(self, inc_ratio):
    gaussian = self.gaussian
    current_num_points = self.active_pts_num
    target_num = min(self.max_pts_num, int((1 + inc_ratio) * current_num_points))
    num_new = max(0, target_num - current_num_points)

    if num_new <= 0:
      return 0
    
    # sample from probs, use grad/radii as probs
    probs = self.sample_score
    
    if probs.sum() == 0:
      return 0
    if self.random_sample:
      add_idx = self.sample_alives(probs=probs, num=num_new)
    else:
      add_idx = probs.argsort(descending=True)[:num_new]
    new_idx = torch.arange(gaussian.binding.size(0), gaussian.binding.size(0) + num_new, device=self.device)
      
    (
      new_xyz, 
      new_features_dc,
      new_features_rest,
      new_opacity,
      new_scaling,
      new_rotation 
    ) = self.clone_params(add_idx)
    
    gaussian.binding.data = torch.cat([gaussian.binding, gaussian.binding[add_idx]], dim=0)
    self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation)
    self.replace_tensors_to_optimizer(inds=add_idx)
    if self._add_noise:
      self.add_noise(torch.cat([add_idx, new_idx]))
    return num_new
            
  def split_gs(self, 
                mask: torch.Tensor, 
                split_num:int=2
                ):
    gaussian = self.gaussian
    num_org = mask.sum()
    if num_org <= 0:
      return    
    
    idxs = mask.nonzero(as_tuple=True)[0].repeat(split_num - 1)
    num_new = idxs.size(0)
    new_idxs = torch.arange(gaussian.binding.size(0), gaussian.binding.size(0) + num_new, device=self.device)
    
    new_xyz = gaussian._xyz[idxs]
    new_dc = gaussian._features_dc[idxs]
    new_rest = gaussian._features_rest[idxs]
    new_opacity = gaussian._opacity[idxs]
    new_scaling =  gaussian.inverse_scaling_activation(gaussian.scaling[idxs] / np.sqrt(split_num)) # 分裂的核
    gaussian._scaling.data[idxs[:num_org]] = new_scaling[:num_org] # 原核直接缩
    new_rotation = gaussian._rotation[idxs]
    
    gaussian.binding.data = torch.cat([gaussian.binding, gaussian.binding[idxs]], dim=0)
    self.densification_postfix(new_xyz, new_dc, new_rest, new_opacity, new_scaling, new_rotation)
    self.replace_tensors_to_optimizer(inds=idxs.unique())
    if self._add_noise:
      self.add_noise(torch.cat([idxs, new_idxs]))
    return num_new
  
  @staticmethod
  def op_sigmoid(x, k=100, x0=0.995):
    return 1 / (1 + torch.exp(-k * (x - x0)))
  
  def add_noise(self, idx=None, lr=1e-3):
    gaussian = self.gaussian
    if idx is None:
      idx = torch.arange(gaussian._xyz.shape[0], device=gaussian._xyz.device)
    scaling_3d = torch.concat([gaussian.scaling[idx], torch.zeros((idx.size(0), 1), device=gaussian.scaling.device)], dim=-1)
    L = build_scaling_rotation(scaling_3d, gaussian.rotation[idx])
    actual_covariance = L @ L.transpose(1, 2) # 构造Σ-1
    # noise = torch.randn_like(gaussian._xyz[idx]) * (self.op_sigmoid(1- gaussian.opacity[idx])) * lr * gaussian._camera_scale
    noise = torch.randn_like(gaussian._xyz[idx]) * (self.op_sigmoid(1- gaussian.opacity[idx])) * lr
    noise = torch.bmm(actual_covariance, noise.unsqueeze(-1)).squeeze(-1)
    gaussian._xyz[idx].add_(noise)
  
  def get_binding_kernel(self, region_idx):
    tracker = self.tracker
    gaussian = self.gaussian
    face_verts = tracker.flame.faces[torch.isin(tracker.flame.mask.fid2cid[:10174, ...], region_idx)]
    verts_idxs = face_verts.unique()
    verts_idxs = verts_idxs[verts_idxs < gaussian.binding_num]
    return torch.isin(gaussian.binding, verts_idxs)
  
  def get_binding_mask(self, 
                       vtxs: torch.Tensor,  # B, V, 3
                       region_idx: Union[int, torch.Tensor] # F, 3 
                       ) -> torch.Tensor:
    tracker = self.tracker
    gaussian = self.gaussian
    face_num = tracker.flame.faces.size(0)
    if isinstance(region_idx, int):
      region_idx = torch.tensor([region_idx], device=vtxs.device, dtype=torch.long)
    region_idx = region_idx.to(vtxs.device)
    mask_idxs = torch.isin(tracker.flame.mask.fid2cid[:face_num, ...], region_idx)
    face_verts = tracker.flame.faces[mask_idxs]
    face_verts = face_verts[(face_verts < gaussian.binding_num).any(dim = -1)]
    _, mask_img = self.nvdrast_normal(vtxs, face_verts.int())
    return (mask_img > 0).float()
            
  def load(self, pth):
      with open(pth, "rb") as f:
        sd = torch.load(f, map_location="cpu", weights_only=True)
      # 先读取张量不确定的参数
      self.gaussian.binding = nn.Parameter(sd.pop("gaussian.binding").to(self.device), requires_grad=False)
      self.gaussian._xyz = nn.Parameter(sd.pop("gaussian._xyz").to(self.device), requires_grad=True)
      self.gaussian._features_dc = nn.Parameter(sd.pop("gaussian._features_dc").to(self.device), requires_grad=True)
      self.gaussian._features_rest = nn.Parameter(sd.pop("gaussian._features_rest").to(self.device), requires_grad=True)
      self.gaussian._opacity = nn.Parameter(sd.pop("gaussian._opacity").to(self.device), requires_grad=True)
      self.gaussian._scaling = nn.Parameter(sd.pop("gaussian._scaling").to(self.device), requires_grad=True)
      self.gaussian._rotation = nn.Parameter(sd.pop("gaussian._rotation").to(self.device), requires_grad=True)
      self.load_state_dict(sd, strict=False)
      
  def load_auto(self, *args, **kwargs): # 更新相机缩放
    super().load_auto(*args, **kwargs)
    for k in sorted(self.scheduler_camera_scaling.keys()):
      if k <= self._epoch:
        to_scale = self.scheduler_camera_scaling[k]
        self.gaussian.camera_set_scale(to_scale)
        self.deformer.set_scale(to_scale)
    return self
      