# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

# Copyright (C) 2024-present Naver Corporation. All rights reserved.
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
#
# --------------------------------------------------------
# Implementation of DUSt3R training losses
# --------------------------------------------------------
from copy import copy, deepcopy
import sys

import omegaconf
import roma
import torch
import torch.nn as nn
import torch.nn.functional as F

from math import ceil, floor

from fast3r.eval.recon_metric import calculate_corresponding_points_error_torch
from fast3r.dust3r.heads.camera import transform_gt_poses_to_pr_world
from fast3r.dust3r.heads.camera import get_new_world_origin2, pose_encoding_to_camera, get_new_world_origin, get_center_world
from fast3r.eval.utils import align_local_pts3d_to_global_with_cam, get_local_pts3d_from_depth_views
from fast3r.dust3r.inference import find_opt_scaling, get_pred_pts3d
from fast3r.dust3r.utils.geometry import (
    geotrf,
    get_joint_pointcloud_center_scale,
    get_joint_pointcloud_depth,
    inv,
    normalize_pointcloud,
)
from fast3r.dust3r.loss.vggt_loss import reg_loss2, check_and_fix_inf_nan,norm_factor_depth
from fast3r.dust3r.loss.vggt_loss import normalize_pointcloud as normalize_pointcloud_vggt
# from fast3r.dust3r.loss.vggt_loss import normalize_pointcloud_by_max as normalize_pointcloud_vggt

def Sum(*losses_and_masks):
    if len(losses_and_masks[0]) == 2:
        loss, mask = losses_and_masks[0]
    else:
        loss, mask, loss_type = losses_and_masks[0]

    if loss.ndim > 0:
        # we are actually returning the loss for every pixels
        return losses_and_masks
    else:
        # we are returning the global loss
        for loss2, mask2 in losses_and_masks[1:]:
            loss = loss + loss2
        return loss

class LLoss(nn.Module):
    """L-norm loss"""

    def __init__(self, reduction="mean"):
        super().__init__()
        self.reduction = reduction

    def forward(self, a, b):
        assert (
            a.shape == b.shape and a.ndim >= 2 and 1 <= a.shape[-1] <= 3
        ), f"Bad shape = {a.shape}"
        dist = self.distance(a, b)
        assert dist.ndim == a.ndim - 1  # one dimension less
        if self.reduction == "none":
            return dist
        if self.reduction == "sum":
            return dist.sum()
        if self.reduction == "mean":
            return dist.mean() if dist.numel() > 0 else dist.new_zeros(())
        raise ValueError(f"bad {self.reduction=} mode")

    def distance(self, a, b):
        raise NotImplementedError()

class L21Loss(LLoss):
    """Euclidean distance between 3d points"""

    def distance(self, a, b):
        return torch.norm(a - b, dim=-1)  # normalized L2 distance

class SmoothL1Loss(LLoss):
    """Huber (Smooth-L1) distance between 3D points."""
    def __init__(self, beta: float = 1.0, reduction: str = 'mean'):
        """
        beta: transition point between L2 and L1 (default 1.0)
        reduction: 'none'|'mean'|'sum'
        """
        super().__init__(reduction)
        self.beta = beta

    def distance(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
        """
        a, b: (..., 3) tensors of 3D points
        returns per-point loss of shape a.shape[:-1]
        """
        # compute per-coordinate smooth L1
        per_coord = F.smooth_l1_loss(a, b,
                                     reduction='none',
                                     beta=self.beta)  # same shape as a,b
        # sum over the 3 coords → one scalar per point
        return per_coord.sum(dim=-1)

# L21 = L21Loss()
def combine_losses(losses):
    if isinstance(losses, nn.Module):
        return losses
    if isinstance(losses, omegaconf.listconfig.ListConfig):
        loss = None
        scale = 1
        for o in losses:
            if isinstance(o, (int, float)):
                scale = o
                continue
            if loss is None:
                loss = scale * o
            else:                
                loss = loss + scale * o   
        return loss
    
    return None

class Criterion(nn.Module):
    def __init__(self, criterion=None):
        super().__init__()
        assert isinstance(criterion, LLoss), (
            f"{criterion} is not a proper criterion!" + bb()
        )
        self.criterion = copy(criterion)

    def get_name(self):
        return f"{type(self).__name__}({self.criterion})"

    def with_reduction(self, mode="none"):
        res = loss = deepcopy(self)
        while loss is not None:
            assert isinstance(loss, Criterion)
            loss.criterion.reduction = mode  # make it return the loss for each sample
            loss = loss._loss2  # we assume loss is a Multiloss
        return res

class MultiLoss(nn.Module):
    fuv_scaler = 256
    first_batch = False
    """Easily combinable losses (also keep track of individual loss values):
        loss = MyLoss1() + 0.1*MyLoss2()
    Usage:
        Inherit from this class and override get_name() and compute_loss()
    """

    def __init__(self):
        super().__init__()
        self._alpha = 1
        self._loss2 = None

    def compute_loss(self, *args, **kwargs):
        raise NotImplementedError()

    def get_name(self):
        raise NotImplementedError()

    def __mul__(self, alpha):
        assert isinstance(alpha, (int, float))
        res = copy(self)
        res._alpha = alpha
        return res

    __rmul__ = __mul__  # same

    def __add__(self, loss2):
        # assert isinstance(loss2, MultiLoss)
        res = cur = copy(self)
        # find the end of the chain
        while cur._loss2 is not None:
            cur = cur._loss2
        cur._loss2 = loss2
        return res

    def __repr__(self):
        name = self.get_name()
        if self._alpha != 1:
            name = f"{self._alpha:g}*{name}"
        if self._loss2:
            name = f"{name} + {self._loss2}"
        return name

    def forward(self, *args, **kwargs):
        loss = self.compute_loss(*args, **kwargs)
        if isinstance(loss, tuple):
            loss, details = loss
        elif loss.ndim == 0:
            details = {self.get_name(): float(loss)}
        else:
            details = {}
        loss = loss * self._alpha

        if self._loss2:
            loss2 = self._loss2(*args, **kwargs)
            if isinstance(loss2, tuple):
                loss2, details2 = loss2
            elif loss.ndim == 0:
                details2 = {self._loss2.get_name(): float(loss2)}
            else:
                details2 = {}
                loss = loss + loss2
            details |= details2

        return loss, details

class Regr3DMultiviewV3(Criterion, MultiLoss):
    """Ensure that all 3D points are correct for multiple views.
    The point clouds from all views are concatenated together for normalization,
    but loss is calculated separately for each view.
    This version supports an additional local head for local coordinate systems.
    """

    def __init__(self, criterion, norm_mode="avg_dis", gt_scale=False, result_keys=None, normalize=True, relative=False, dist_clip=None, global_from_local=True, local_from_depth=True, local_weight=1.0, rel_key='pred_world_local'):
        super().__init__(criterion)
        self.norm_mode = norm_mode
        self.gt_scale = gt_scale
        self.norm_factor_pr = None
        self.norm_factor_gt = None
        self.norm_depth_factor_pr = None
        self.norm_depth_factor_gt = None
        self.normalize = normalize
        self.relative = relative
        self.dist_clip = dist_clip
        self.global_from_local = global_from_local
        self.local_from_depth = local_from_depth
        self.local_weight = local_weight
        self.rel_key = rel_key
        if result_keys is None:
            self.result_keys = {'local':"pts3d_in_self_view", 'global':"pts3d_in_other_view",'depth':'depth','conf_depth':'conf_depth','fuv':'camera_intrinsics'}
        else:
            self.result_keys = result_keys


    def get_pts3d_from_local_views(self, gt_views, pred_views, dist_clip=None, pose_key='camera_pose', pred_key=None, result_key='pts3d_in_self_view_aligned_to_global'):
        """Get point clouds and valid masks for multiple views."""
        gt_pts_list = []
        pr_pts_list = []
        valid_mask_list = []

        if pred_key is None:
            pred_key=self.result_keys["local"]

        # inv_matrix_anchor = inv(gt_views[0][pose_key].float())
        if self.training:
            arg = gt_views
        else:
            arg=None

        align_local_pts3d_to_global_with_cam(pred_views, pose_key=pose_key, pred_key=pred_key, result_key=result_key, gt_views=arg)
        for gt_view, pred_view in zip(gt_views, pred_views):
            # Use the anchor view (first view) transformation for global loss
            gt_pts = gt_view["pts3d"]
            # geotrf(inv_matrix_anchor, gt_view["pts3d"])  # Transform GT points to anchor view
            
            # pr_pts = pred_view.get(self.result_keys['global'])  # Predicted points in anchor view
            valid_gt = gt_view["valid_mask"].clone()

            if dist_clip is not None:
                dis = gt_pts.norm(dim=-1)
                valid_gt &= dis <= dist_clip

            gt_pts_list.append(gt_pts)
            pr_pts_list.append(pred_view[result_key])
            valid_mask_list.append(valid_gt)

        return gt_pts_list, pr_pts_list, valid_mask_list

    def get_transform(self, pred_views, gt_views, compute_scaling_flag=False, align_pose=True):
        """
        Computes the transformation from ground truth to prediction space.
        Optionally, it transforms the 3D points and camera poses of the ground truth views
        to align with the prediction world space.

        Args:
            pred_views (list): A list of dictionaries, each containing batched prediction data for a view.
            gt_views (list): A list of dictionaries, each containing batched ground truth data for a view.
                            Expected to contain 'pts3d' and optionally 'camera_pose'.
            compute_scaling_flag (bool): If True, computes scaling in the transformation. Not fully implemented.
            align_pose (bool): If True, aligns the 'camera_pose' from gt_views to the prediction world.

        Returns:
            tuple: A tuple containing:
                - gt_transformed (list): A list of tensors for the transformed gt 3D points.
                - all_R (torch.Tensor): The batched rotation matrices (B, 3, 3).
                - all_t (torch.Tensor): The batched translation vectors (B, 1, 3).
                - all_s (None): Placeholder for the scale factor.
        """
        pred_pts3d = torch.stack([pred[self.result_keys['global']]for pred in pred_views ], dim=1)
        gt_pts = torch.stack([gt["pts3d"] for gt in gt_views], dim=1)
        device = gt_pts.device
        dtype = gt_pts.dtype
        # print(gt_pts.shape)
        B, S, H, W, C = gt_pts.shape
        
        # Lists to store results for each sample
        rotations_list = []
        translations_list = []
        scales_list = [] # If computing scaling

        for i in range(B):
            # Get the i-th sample
            pts1_reshaped = gt_pts[i].reshape(-1, 3) # Shape: (S * H * W, 3)
            pts2_reshaped = pred_pts3d[i].reshape(-1, 3) # Shape: (S * H * W, 3)

            # This finds the transform from the GT world to the PR world (T_gt_to_pr)
            R, t = roma.rigid_points_registration(
                pts1_reshaped,
                pts2_reshaped,
                compute_scaling=compute_scaling_flag
            )
            rotations_list.append(R)
            translations_list.append(t)

        # Stack the results into batched tensors
        all_R = torch.stack(rotations_list, dim=0)     # Shape: (B, 3, 3)
        all_t = torch.stack(translations_list, dim=0).reshape(B, 1, 3)     # Shape: (B, 1, 3)
        
        # IN-PLACE TRANSFORMATION OF GT POSES
        gt_poses_in_pr_world = transform_gt_poses_to_pr_world(all_R, all_t, [view['camera_pose'] for view in gt_views])
        for i, view in enumerate(gt_views):
            view['camera_pose'] = gt_poses_in_pr_world[i]

        # Align GT points for loss calculation
        p1_reshaped = gt_pts.view(B, -1, 3)
        rotated_points = torch.matmul(p1_reshaped, all_R.transpose(-2, -1))
        p1_transformed_flat = rotated_points + all_t 
        gt_transformed = p1_transformed_flat.view(*gt_pts.shape)
        # print(pred_transformed.shape)[8, 4, 238, 238, 3]
        gt_transformed = [gt_transformed[:, i] for i in range(S)]
        # print(pred_transformed[0].shape)

        # 2. Align the ground truth camera poses to the prediction world space
        if align_pose and "camera_pose" in gt_views[0]:
            # Stack poses from all views into a single tensor of shape (B, S, 4, 4)
            gt_poses = torch.stack([gt["camera_pose"] for gt in gt_views], dim=1)

            # Construct the 4x4 alignment matrix T_align for the batch (B, 4, 4)
            # This matrix transforms points from gt-world to pred-world
            T_align = torch.eye(4, device=device, dtype=dtype).unsqueeze(0).repeat(B, 1, 1)
            T_align[:, :3, :3] = all_R.transpose(-2, -1) # Set rotation part
            T_align[:, 3, :3] = all_t.squeeze(1)         # Set translation part

            # A camera pose is a transformation from camera space to world space (P_cam_to_world).
            # To get the new pose, we chain the transformations: P_new = P_old @ T_align
            # P_new transforms camera space -> gt_world -> pred_world
            aligned_poses = torch.matmul(gt_poses, T_align.unsqueeze(1))

            # Update the gt_views dictionaries in-place with the aligned poses
            for s_idx in range(S):
                gt_views[s_idx]["camera_pose"] = aligned_poses[:, s_idx]

        return gt_transformed, all_R, all_t, None #all_s

    def get_transform_pr(self, pred_views, gt_views, compute_scaling_flag = False):
        pred_pts3d = torch.stack([pred[self.result_keys['global']]for pred in pred_views ], dim=1)
        gt_pts = torch.stack([gt["pts3d"] for gt in gt_views], dim=1)
        # print(gt_pts.shape)
        B, S, H, W, C = gt_pts.shape
        
        # Lists to store results for each sample
        rotations_list = []
        translations_list = []
        scales_list = [] # If computing scaling

        for i in range(B):
            # Get the i-th sample
            pts2_reshaped = gt_pts[i].reshape(-1, 3) # Shape: (S * H * W, 3)
            pts1_reshaped = pred_pts3d[i].reshape(-1, 3) # Shape: (S * H * W, 3)

            # This finds the transform from the PR world to the GT world (T_pr_to_gt)
            R, t = roma.rigid_points_registration(
                pts1_reshaped,
                pts2_reshaped,
                compute_scaling=compute_scaling_flag
            )
            rotations_list.append(R)
            translations_list.append(t)

        # Stack the results into batched tensors
        all_R = torch.stack(rotations_list, dim=0)     # Shape: (B, 3, 3)
        all_t = torch.stack(translations_list, dim=0).reshape(B, 1, 3)     # Shape: (B, 1, 3)
        
        # To transform GT poses to PR world, we need the inverse transform: T_gt_to_pr
        R_inv = all_R.transpose(-2, -1)
        # t_inv = -R_inv @ all_t, handling batch dimensions
        t_inv = -torch.bmm(R_inv, all_t.permute(0, 2, 1)).permute(0, 2, 1)

        # IN-PLACE TRANSFORMATION OF GT POSES using the correct inverse transform
        gt_poses_in_pr_world = transform_gt_poses_to_pr_world(R_inv, t_inv, [view['camera_pose'] for view in gt_views])
        for i, view in enumerate(gt_views):
            view['camera_pose'] = gt_poses_in_pr_world[i]

        # The rest of the function uses the original T_pr_to_gt to align predicted points
        p1_reshaped = pred_pts3d.view(B, -1, 3)
        rotated_points = torch.matmul(p1_reshaped, all_R.transpose(-2, -1))
        p1_transformed_flat = rotated_points + all_t 
        gt_transformed = p1_transformed_flat.view(*gt_pts.shape)
        gt_transformed = [gt_transformed[:, i] for i in range(gt_transformed.shape[1])]

        return gt_transformed, all_R, all_t, None #all_s
    
    def get_pts3d_from_views(self, gt_views, pred_views, dist_clip=None, local=False, pose_key="camera_pose", rel_key='registration'):
        """Get point clouds and valid masks for multiple views."""
        gt_pts_list = []
        pr_pts_list = []
        valid_mask_list = []

        # print(pred_views[0]['camera_pose'].shape, gt_views[0]["camera_pose"].shape)
        # torch.Size([6, 7]) torch.Size([6, 4, 4])
        # pr_poses = [pose_encoding_to_camera(pred[pose_key]) for pred in pred_views]
        # gt_poses = [gt_view["camera_pose"] for gt_view in gt_views]   
             
        if self.relative and not local:
            registration_pr = False
            if 'registration_pr' == rel_key:
                # with torch.no_grad():     
                pr_pts_list, all_R, all_t, all_s =self.get_transform_pr(pred_views, gt_views)       

                for gt_view, pred_view in zip(gt_views, pred_views):                
                    gt_pts = gt_view["pts3d"]  # Transform GT points to anchor view
                    valid_gt = gt_view["valid_mask"].clone()

                if dist_clip is not None:
                    dis = gt_pts.norm(dim=-1)
                    valid_gt &= dis <= dist_clip

                    gt_pts_list.append(gt_pts)
                    valid_mask_list.append(valid_gt)

                return gt_pts_list, pr_pts_list, valid_mask_list
            
            registration = False
            if rel_key == 'registration':
                with torch.no_grad():     
                    gt_pts_list, all_R, all_t, all_s =self.get_transform(pred_views, gt_views)       
            # global_poses = torch.stack([pred['global_pose'] for pred in pred_views], dim=1)
            # global_poses_mean = torch.mean(global_poses, dim=1, keepdim=False)
            # world2_pose = pose_encoding_to_camera(global_poses_mean)
                for (gt_view, pred_view), gt_pts in zip(zip(gt_views, pred_views), gt_pts_list):                
                # for gt_view, pred_view in zip(gt_views, pred_views):                
                        # Use the anchor view (first view) transformation for global loss
                    # gt_pts = gt_view["pts3d"]  # Transform GT points to anchor view
                    pr_pts = pred_view.get(self.result_keys['global'])  # Predicted points in anchor view
                    valid_gt = gt_view["valid_mask"].clone()

                    if dist_clip is not None:
                        dis = gt_pts.norm(dim=-1)
                        valid_gt &= dis <= dist_clip

                    # gt_pts_list.append(gt_pts)
                    pr_pts_list.append(pr_pts)
                    valid_mask_list.append(valid_gt)

                return gt_pts_list, pr_pts_list, valid_mask_list
            
            center_world=False
            if rel_key == 'center_world':
                T_new_to_old, T_old_to_new, gt_poses_new = get_center_world(pred_views, gt_views, pose_key=pose_key)
                for gt_view, pred_view in zip(gt_views, pred_views):                    
                    # Use the anchor view (first view) transformation for global loss
                    gt_pts = geotrf(T_old_to_new, gt_view["pts3d"])  # Transform GT points to anchor view
                    pr_pts =pred_view.get(self.result_keys['global'])  # Predicted points in anchor view
                    valid_gt = gt_view["valid_mask"].clone()

                    if dist_clip is not None:
                        dis = gt_pts.norm(dim=-1)
                        valid_gt &= dis <= dist_clip

                    gt_pts_list.append(gt_pts)
                    pr_pts_list.append(pr_pts)
                    valid_mask_list.append(valid_gt)

                return gt_pts_list, pr_pts_list, valid_mask_list
            pred_world_local=True
            if rel_key == 'pred_world_local':
                # T_new_to_old, T_old_to_new = get_new_world_origin2(pred_views, gt_views, pose_key=pose_key)
                for gt_view, pred_view in zip(gt_views, pred_views):    
                    gt_pts = gt_view["pts3d_cam"]  # Transform GT points to anchor view
                    inv_matrix = inv(pose_encoding_to_camera(pred_view[pose_key]))
                    pr_pts =geotrf(inv_matrix, pred_view.get(self.result_keys['global']))  # Predicted points in anchor view
                    valid_gt = gt_view["valid_mask"].clone()

                    if dist_clip is not None:
                        dis = gt_pts.norm(dim=-1)
                        valid_gt &= dis <= dist_clip

                    gt_pts_list.append(gt_pts)
                    pr_pts_list.append(pr_pts)
                    valid_mask_list.append(valid_gt)

                return gt_pts_list, pr_pts_list, valid_mask_list

            pred_world=True
            if rel_key == 'pred_world':
                T_new_to_old, T_old_to_new = get_new_world_origin2(pred_views, gt_views, pose_key=pose_key)            
                for gt_view, pred_view in zip(gt_views, pred_views):                    
                    gt_pts = geotrf(T_old_to_new, gt_view["pts3d"])  # Transform GT points to anchor view
                    pr_pts =pred_view.get(self.result_keys['global'])  # Predicted points in anchor view
                    valid_gt = gt_view["valid_mask"].clone()

                    gt_view["camera_pose"] = geotrf(T_old_to_new, gt_view["camera_pose"])

                    if dist_clip is not None:
                        dis = gt_pts.norm(dim=-1)
                        valid_gt &= dis <= dist_clip

                    gt_pts_list.append(gt_pts)
                    pr_pts_list.append(pr_pts)
                    valid_mask_list.append(valid_gt)

                return gt_pts_list, pr_pts_list, valid_mask_list

            get_global_pose=False
            if rel_key == 'get_global_pose':
                pose_world2_to_world1, w2view1_pose = get_new_world_origin(pred_views, pose_key=pose_key)
                inv_matrix_anchor = inv(gt_views[0]["camera_pose"].float())
                for gt_view, pred_view in zip(gt_views, pred_views):                    
                    # Use the anchor view (first view) transformation for global loss
                    gt_pts = geotrf(inv_matrix_anchor, gt_view["pts3d"])  # Transform GT points to anchor view
                    pr_pts = geotrf(w2view1_pose, pred_view.get(self.result_keys['global']))  # Predicted points in anchor view
                    valid_gt = gt_view["valid_mask"].clone()

                    if dist_clip is not None:
                        dis = gt_pts.norm(dim=-1)
                        valid_gt &= dis <= dist_clip

                    gt_pts_list.append(gt_pts)
                    pr_pts_list.append(pr_pts)
                    valid_mask_list.append(valid_gt)

                return gt_pts_list, pr_pts_list, valid_mask_list            

        if not local:  # compute the inverse transformation for the anchor view (first view)
            inv_matrix_anchor = inv(gt_views[0]["camera_pose"].float())

        for gt_view, pred_view in zip(gt_views, pred_views):
            if local:
                # Rotate GT points to align with the local camera origin for supervision
                inv_matrix_local = inv(gt_view["camera_pose"].float())
                gt_pts = geotrf(inv_matrix_local, gt_view["pts3d"])  # Transform GT points to local view's origin
                pr_pts = pred_view.get(self.result_keys['local'])  # Local predicted points
            else:
                # Use the anchor view (first view) transformation for global loss
                gt_pts = geotrf(inv_matrix_anchor, gt_view["pts3d"])  # Transform GT points to anchor view
                pr_pts = pred_view.get(self.result_keys['global'])  # Predicted points in anchor view
            valid_gt = gt_view["valid_mask"].clone()

            if dist_clip is not None:
                dis = gt_pts.norm(dim=-1)
                valid_gt &= dis <= dist_clip

            gt_pts_list.append(gt_pts)
            pr_pts_list.append(pr_pts)
            valid_mask_list.append(valid_gt)

        return gt_pts_list, pr_pts_list, valid_mask_list

    def normalize_pointcloud_from_views(self, pts_list, norm_mode="avg_dis", valid_list=None):
        """Normalize point clouds from multiple views, excluding invalid points from normalization."""
        assert all(pts.ndim >= 3 and pts.shape[-1] == 3 for pts in pts_list)

        norm_mode, dis_mode = norm_mode.split("_")

        # Concatenate all point clouds and valid masks if provided
        all_pts = torch.cat(pts_list, dim=1)
        # B, S*H?, W?, 3
        if valid_list is not None:
            all_valid = torch.cat(valid_list, dim=1)
            valid_pts = all_pts[all_valid]  # Keep only valid points for norm calculation
        else:
            valid_pts = all_pts
# valid_pts.shape -1, 3
        # Compute the distance to the origin for valid points
        dis = valid_pts.norm(dim=-1)

        # Apply distance transformation based on dis_mode
        if dis_mode == "dis":
            pass  # Do nothing
        elif dis_mode == "log1p":
            dis = torch.log1p(dis)
        elif dis_mode == "warp-log1p":
            log_dis = torch.log1p(dis)
            warp_factor = log_dis / dis.clip(min=1e-8)
            all_pts = all_pts * warp_factor.view(-1, 1)  # Warp the points with the warp factor
            dis = log_dis  # The final distance is now the log-transformed distance
        else:
            raise ValueError(f"Unsupported distance mode: {dis_mode}")

        # Apply different normalization modes
        if norm_mode == "avg":
            norm_factor = dis.mean()  # Compute mean distance of valid points
        elif norm_mode == "median":
            norm_factor = dis.median()  # Compute median distance of valid points
        else:
            raise ValueError(f"Unsupported normalization mode: {norm_mode}")

        norm_factor = norm_factor.clip(min=1e-8)  # Prevent division by zero; a number

        # Normalize all point clouds
        normalized_pts = [torch.where(valid.unsqueeze(-1), pts / norm_factor, pts)
                          for pts, valid in zip(pts_list, valid_list)]

        return normalized_pts, norm_factor.detach()

    def normalize_pointcloud_per_view(self, pts_list, norm_mode="avg_dis", valid_list=None):
        """Normalize point clouds on a per-view basis."""
        norm_mode, dis_mode = norm_mode.split("_")

        normed_pts_list = []
        for pts, valid in zip(pts_list, valid_list):
            if valid is not None:
                valid_pts = pts[valid]
            else:
                valid_pts = pts

            dis = valid_pts.norm(dim=-1)

            # Apply distance transformation based on dis_mode
            if dis_mode == "dis":
                pass  # Do nothing
            elif dis_mode == "log1p":
                dis = torch.log1p(dis)
            elif dis_mode == "warp-log1p":
                log_dis = torch.log1p(dis)
                warp_factor = log_dis / dis.clip(min=1e-8)
                pts = pts * warp_factor.view(-1, 1)  # Warp the points with the warp factor
                dis = log_dis  # The final distance is now the log-transformed distance
            else:
                raise ValueError(f"Unsupported distance mode: {dis_mode}")

            if norm_mode == "avg":
                norm_factor = dis.mean()  # Per-view normalization
            elif norm_mode == "median":
                norm_factor = dis.median()
            else:
                raise ValueError(f"Unsupported normalization mode: {norm_mode}")

            norm_factor = norm_factor.clip(min=1e-8)  # Avoid division by zero

            normed_pts_list.append(torch.where(valid.unsqueeze(-1), pts / norm_factor, pts))

        return normed_pts_list

    def normalize_pointcloud(self, pr_pts_list, gt_pts_list, valid_mask_list, not_metric_mask=None):
        # assert mask_gt.ndim == 4
        mask_gt = torch.stack(valid_mask_list, dim=1)

        all_gt_pts = torch.stack(gt_pts_list, dim=1)
        # assert all_gt_pts.ndim == 5            
        all_gt_pts, norm_factor_gt = normalize_pointcloud_vggt(all_gt_pts, mask_gt)
        gt_pts_list = [all_gt_pts[:, i, :, :, :] for i in range(all_gt_pts.shape[1])]

        norm_factor_pr = norm_factor_gt.clone()
        
        if not_metric_mask is not None and not_metric_mask.sum() > 0:
            all_pr_pts = torch.stack(pr_pts_list, dim=1)
            # assert all_pr_pts.ndim == 5            
            norm_factor_pr[not_metric_mask] = normalize_pointcloud_vggt(all_pr_pts[not_metric_mask], mask_gt[not_metric_mask], return_pts=False)

        pr_pts_list = [pr_pts / norm_factor_pr.view(-1, 1, 1, 1) for pr_pts in pr_pts_list]
        # assert pr_pts_list[0].ndim == 4

        return pr_pts_list, gt_pts_list, norm_factor_pr, norm_factor_gt
    
    def compute_loss(self, gts, preds, **kw):
        total_loss = []
        details = {}
        self_name = "Regr3DMultiviewV3"
        not_metric_mask=~gts[0]["is_metric"]

        if self.norm_mode == 'depth' and (self.result_keys['depth'] in preds[0] or self.result_keys['local'] in preds[0]):
            self.norm_depth_factor_gt, self.norm_depth_factor_pr = norm_factor_depth(gts, preds, depth_key=self.result_keys['depth'])

        if self.result_keys['global'] in preds[0]:
            rel_key = self.rel_key
            gt_pts_list, pr_pts_list, valid_mask_list = self.get_pts3d_from_views(gts, preds, rel_key=rel_key, **kw)
            
            if gt_pts_list is not None:
                if not self.gt_scale:
                    pr_pts_list, gt_pts_list, norm_factor_pr, norm_factor_gt = self.normalize_pointcloud(pr_pts_list, gt_pts_list, valid_mask_list, not_metric_mask=not_metric_mask)                    
                    if not self.relative:
                        self.norm_factor_pr = norm_factor_pr
                        self.norm_factor_gt = norm_factor_gt

                # Compute loss for each view in global coordinate system
                for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list, pr_pts_list, valid_mask_list)):
                    if self.dist_clip is not None:
                        dis = gt_pts.norm(dim=-1)
                        valid_mask &= dis <= self.dist_clip
                    loss = self.criterion(pr_pts[valid_mask], gt_pts[valid_mask])
                    total_loss.append((loss, valid_mask, "wglobal"))
                    # details[self_name + f"_pts3d_loss_global/{i:02d}"] = float(loss.mean())
                     
            if rel_key == 'pred_world_local' and self.relative:
                gt_pts_list, pr_pts_list, valid_mask_list = self.get_pts3d_from_views(gts, preds, rel_key='pred_world', **kw)
                if not self.gt_scale:
                    pr_pts_list, gt_pts_list, norm_factor_pr, norm_factor_gt = self.normalize_pointcloud(pr_pts_list, gt_pts_list, valid_mask_list, not_metric_mask=not_metric_mask)

                for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list, pr_pts_list, valid_mask_list)):
                    if self.dist_clip is not None:
                        dis = gt_pts.norm(dim=-1)
                        valid_mask &= dis <= self.dist_clip
                    loss = self.criterion(pr_pts[valid_mask], gt_pts[valid_mask])
                    total_loss.append((loss, valid_mask, "global"))
        
        # Check if local loss is needed (i.e., `pts3d_in_self_view` and `conf_self` exist in preds)
        if self.result_keys['local'] in preds[0]:
            # Compute loss for pts3d_in_self_view (local loss)
            gt_pts_list, pr_pts_list, valid_mask_list = self.get_pts3d_from_views(gts, preds, local=True, **kw)

            if not self.gt_scale:
                if self.norm_mode == 'depth' and self.norm_depth_factor_gt is not None:
                    n_dims =gt_pts_list[0].ndim
                    norm_factor_gt2 = self.norm_depth_factor_gt.view([-1] + [1] * (n_dims - 1))
                    norm_factor_pr2 = self.norm_depth_factor_pr.view([-1] + [1] * (n_dims - 1))
                    gt_pts_list = [gt_prts_local/norm_factor_gt2 for gt_prts_local in gt_pts_list]
                    pr_pts_list = [pr_prts_local/norm_factor_pr2 for pr_prts_local in pr_pts_list]   
                else:                          
                    pr_pts_list, gt_pts_list, norm_factor_pr, norm_factor_gt = self.normalize_pointcloud(pr_pts_list, gt_pts_list, valid_mask_list, not_metric_mask=not_metric_mask)
            
            # Compute loss for each view in its local coordinate system
            for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list, pr_pts_list, valid_mask_list)):
                loss_local = self.criterion(pr_pts[valid_mask], gt_pts[valid_mask])
                total_loss.append((loss_local, valid_mask, "local"))
                details[self_name + f"_pts3d_loss_local/{i:02d}"] = float(loss_local.mean())

            if self.global_from_local:
            # local to global loss
                gt_pts_list, pr_pts_list, valid_mask_list = self.get_pts3d_from_local_views(gts, preds, **kw)
                if not self.gt_scale:
                    pr_pts_list, gt_pts_list, self.norm_factor_pr, self.norm_factor_gt = self.normalize_pointcloud(pr_pts_list, gt_pts_list, valid_mask_list, not_metric_mask=not_metric_mask)

                for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list, pr_pts_list, valid_mask_list)):
                    loss = self.criterion(pr_pts[valid_mask], gt_pts[valid_mask])
                    total_loss.append((loss, valid_mask, "global2"))
                    details[self_name + f"_pts3d_loss_global2/{i:02d}"] = float(loss.mean())

        if self.local_from_depth:
            # if self.training:
            #     arg = gts
            # else:
            arg=None
            get_local_pts3d_from_depth_views(pred_views=preds, result_key='local_from_depth', fuv_key=self.result_keys['fuv'], depth_key=self.result_keys['depth'], fuv_scaler=MultiLoss.fuv_scaler, gt_views=arg)

            gt_pts_list, pr_pts_list, valid_mask_list = self.get_pts3d_from_local_views(gt_views=gts, pred_views=preds, dist_clip=None, pose_key='camera_pose', pred_key='local_from_depth', result_key='depth_aligned_to_global')
            if not self.gt_scale:
                pr_pts_list, gt_pts_list, self.norm_factor_pr, self.norm_factor_gt = self.normalize_pointcloud(pr_pts_list, gt_pts_list, valid_mask_list, not_metric_mask=not_metric_mask)

            for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list, pr_pts_list, valid_mask_list)):
                loss = self.criterion(pr_pts[valid_mask], gt_pts[valid_mask])
                total_loss.append((loss, valid_mask, "dglobal"))
                details[self_name + f"_pts3d_loss_dglobal/{i:02d}"] = float(loss.mean())      
                
            gt_pts_list = []
            pr_pts_list = []
            valid_mask_list = []

            for gt_view, pred_view in zip(gts, preds):
                # if local:
                    # Rotate GT points to align with the local camera origin for supervision
                # inv_matrix_local = inv(gt_view["camera_pose"].float())
                # gt_pts = geotrf(inv_matrix_local, gt_view["pts3d"])  # Transform GT points to local view's origin
                gt_pts = gt_view["pts3d_cam"]
                pr_pts = pred_view.get('local_from_depth')  # Local predicted points
                # else:
                #     # Use the anchor view (first view) transformation for global loss
                #     gt_pts = geotrf(inv_matrix_anchor, gt_view["pts3d"])  # Transform GT points to anchor view
                #     pr_pts = pred_view.get(self.result_keys['global'])  # Predicted points in anchor view
                valid_gt = gt_view["valid_mask"].clone()

                gt_pts_list.append(gt_pts)
                pr_pts_list.append(pr_pts)
                valid_mask_list.append(valid_gt)

            if not self.gt_scale:
                if self.norm_mode == 'depth' and self.norm_depth_factor_gt is not None:
                    n_dims =gt_pts_list[0].ndim
                    norm_factor_gt2 = self.norm_depth_factor_gt.view([-1] + [1] * (n_dims - 1))
                    norm_factor_pr2 = self.norm_depth_factor_pr.view([-1] + [1] * (n_dims - 1))
                    gt_pts_list = [gt_prts_local/norm_factor_gt2 for gt_prts_local in gt_pts_list]
                    pr_pts_list = [pr_prts_local/norm_factor_pr2 for pr_prts_local in pr_pts_list]   
                else:                          
                    pr_pts_list, gt_pts_list, norm_factor_pr, norm_factor_gt = self.normalize_pointcloud(pr_pts_list, gt_pts_list, valid_mask_list, not_metric_mask=not_metric_mask)

            # Compute loss for each view in its local coordinate system
            for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list, pr_pts_list, valid_mask_list)):
                loss_local = self.criterion(pr_pts[valid_mask], gt_pts[valid_mask]) #* self.local_weight
                total_loss.append((loss_local, valid_mask, "dlocal"))
                details[self_name + f"_pts3d_loss_dlocal/{i:02d}"] = float(loss_local.mean())
        if gts[0]["is_metric"].sum() > 0:            
            details['gtnorm'] = norm_factor_gt[gts[0]["is_metric"]].mean()            
            # print(details['gtnorm'])
            # mask = norm_factor_gt.detach().cpu().numpy()>15
            # print([e for e, m in zip(gts[0]["label"], mask) if m])
        return Sum(*total_loss), details

class Eval3DMultiView(Regr3DMultiviewV3):
    def __init__(self, **kargs):
        super().__init__(**kargs)

    def compute_loss(self, gts, preds, **kw):
        total_loss = 0
        details = {}
        self_name = "Eval3DMultiView"

        # if self.norm_mode == 'depth':
        # if self.result_keys['depth'] in preds[0] or self.result_keys['local'] in preds[0]:
        #     self.norm_depth_factor_gt, self.norm_depth_factor_pr = norm_factor_depth(gts, preds, depth_key=self.result_keys['depth'])

        if self.result_keys['global'] in preds[0]:
            # Compute loss for pts3d_in_other_view (global loss)
            rel_key='pred_world'
            gt_pts_list, pr_pts_list, valid_mask_list = self.get_pts3d_from_views(gts, preds, rel_key=rel_key, **kw)
            
            if gt_pts_list is not None:
                #     # print(self.norm_factor_gt.shape)
                # Compute loss for each view in global coordinate system
                abs_loss_list = []
                rel_loss_list = []
                for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list, pr_pts_list, valid_mask_list)):
                    if self.dist_clip is not None:
                        dis = gt_pts.norm(dim=-1)
                        valid_mask &= dis <= self.dist_clip
                    absolute_pts_error, relative_pts_error = calculate_corresponding_points_error_torch(gt_pts[valid_mask], pr_pts[valid_mask])
                    abs_loss_list.append(absolute_pts_error)
                    rel_loss_list.append(relative_pts_error)
                stacked_abs = torch.mean(torch.stack(abs_loss_list))
                stacked_rel = torch.mean(torch.stack(rel_loss_list))
                total_loss += stacked_rel
                details["gpts_err"] = float(stacked_abs)
                details["gpts_rel"] = float(stacked_rel)

        # Check if local loss is needed (i.e., `pts3d_in_self_view` and `conf_self` exist in preds)
        if self.result_keys['local'] in preds[0]:
            # Compute loss for pts3d_in_self_view (local loss)
            gt_pts_list_local, pr_pts_list_local, valid_mask_list_local = self.get_pts3d_from_views(gts, preds, local=True, **kw)

            abs_loss_list = []
            rel_loss_list = []
            # Compute loss for each view in its local coordinate system
            for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list_local, pr_pts_list_local, valid_mask_list_local)):                
                absolute_pts_error, relative_pts_error = calculate_corresponding_points_error_torch(gt_pts[valid_mask], pr_pts[valid_mask])
                abs_loss_list.append(absolute_pts_error)
                rel_loss_list.append(relative_pts_error)
            stacked_abs = torch.mean(torch.stack(abs_loss_list))
            stacked_rel = torch.mean(torch.stack(rel_loss_list))
            total_loss += stacked_rel
            details["lpts_err"] = float(stacked_abs)
            details["lpts_rel"] = float(stacked_rel)

            if self.global_from_local:
            # local to global loss
                gt_pts_list, pr_pts_list, valid_mask_list = self.get_pts3d_from_local_views(gts, preds, **kw)
                abs_loss_list = []
                rel_loss_list = []
                for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list, pr_pts_list, valid_mask_list)):                    
                    absolute_pts_error, relative_pts_error = calculate_corresponding_points_error_torch(gt_pts[valid_mask], pr_pts[valid_mask])
                    abs_loss_list.append(absolute_pts_error)
                    rel_loss_list.append(relative_pts_error)
                stacked_abs = torch.mean(torch.stack(abs_loss_list))
                stacked_rel = torch.mean(torch.stack(rel_loss_list))
                total_loss += stacked_rel
                details["g2pts_err"] = float(stacked_abs)
                details["g2pts_rel"] = float(stacked_rel)

        if self.local_from_depth:
            get_local_pts3d_from_depth_views(pred_views=preds, result_key='local_from_depth', fuv_key=self.result_keys['fuv'], depth_key=self.result_keys['depth'], fuv_scaler=MultiLoss.fuv_scaler)

            gt_pts_list_local, pr_pts_list_local, valid_mask_list_local = self.get_pts3d_from_local_views(gt_views=gts, pred_views=preds, dist_clip=None, pose_key='camera_pose', pred_key='local_from_depth', result_key='depth_aligned_to_global')
            
            abs_loss_list = []
            rel_loss_list = []
            for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list_local, pr_pts_list_local, valid_mask_list_local)):
                absolute_pts_error, relative_pts_error = calculate_corresponding_points_error_torch(gt_pts[valid_mask], pr_pts[valid_mask])
                abs_loss_list.append(absolute_pts_error)
                rel_loss_list.append(relative_pts_error)
            stacked_abs = torch.mean(torch.stack(abs_loss_list))
            stacked_rel = torch.mean(torch.stack(rel_loss_list))
            total_loss += stacked_rel
            details["dgpts_err"] = float(stacked_abs)
            details["dgpts_rel"] = float(stacked_rel)          
            # if not local:  # compute the inverse transformation for the anchor view (first view)
            #     inv_matrix_anchor = inv(gt_views[0]["camera_pose"].float())
            gt_pts_list_local = []
            pr_pts_list_local = []
            valid_mask_list_local = []

            for gt_view, pred_view in zip(gts, preds):
                # if local:
                    # Rotate GT points to align with the local camera origin for supervision
                # inv_matrix_local = inv(gt_view["camera_pose"].float())
                # gt_pts = geotrf(inv_matrix_local, gt_view["pts3d"])  # Transform GT points to local view's origin
                gt_pts = gt_view["pts3d_cam"]
                pr_pts = pred_view.get('local_from_depth')  # Local predicted points
                # else:
                #     # Use the anchor view (first view) transformation for global loss
                #     gt_pts = geotrf(inv_matrix_anchor, gt_view["pts3d"])  # Transform GT points to anchor view
                #     pr_pts = pred_view.get(self.result_keys['global'])  # Predicted points in anchor view
                valid_gt = gt_view["valid_mask"].clone()

                gt_pts_list_local.append(gt_pts)
                pr_pts_list_local.append(pr_pts)
                valid_mask_list_local.append(valid_gt)

            abs_loss_list = []
            rel_loss_list = []
            # Compute loss for each view in its local coordinate system
            for i, (gt_pts, pr_pts, valid_mask) in enumerate(zip(gt_pts_list_local, pr_pts_list_local, valid_mask_list_local)):                
                absolute_pts_error, relative_pts_error = calculate_corresponding_points_error_torch(gt_pts[valid_mask], pr_pts[valid_mask])
                abs_loss_list.append(absolute_pts_error)
                rel_loss_list.append(relative_pts_error)
            stacked_abs = torch.mean(torch.stack(abs_loss_list))
            stacked_rel = torch.mean(torch.stack(rel_loss_list))
            total_loss += stacked_rel
            details["dlpts_err"] = float(stacked_abs)
            details["dlpts_rel"] = float(stacked_rel)   

        return total_loss, details
class ConfLossMultiviewV2(MultiLoss):
    """Weighted regression by learned confidence for multiple views.
    This version normalizes the total confidence loss by the number of global and local losses separately.
    """

    def __init__(self, pixel_loss, alpha=1):
        super().__init__()
        assert alpha > 0
        self.alpha = alpha
        self.pixel_loss = pixel_loss.with_reduction("none")

    def get_name(self):
        return f"ConfLossMultiviewV2({self.pixel_loss})"

    def get_conf_log(self, x):
        return x, torch.log(x)

    def compute_loss(self, gts, preds, **kw):
        # compute per-pixel loss for all views
        total_loss, details = self.pixel_loss(gts, preds, **kw)

        total_conf_loss = 0
        conf_details = {}
        self_name = type(self).__name__

        # Separate counters for global and local losses
        global_count = 0
        local_count = 0

        for loss, mask, loss_type in total_loss:
            if loss_type == "global":
                conf_key = "conf"
                conf, log_conf = self.get_conf_log(preds[global_count][conf_key][mask])
                conf_loss = loss * conf - self.alpha * log_conf
                conf_loss = conf_loss.mean() if conf_loss.numel() > 0 else 0

                conf_details[self_name + f"_conf_loss_global/{global_count:02d}"] = float(conf_loss)

                global_count += 1

            elif loss_type == "local":
                conf_key = "conf_self"
                conf, log_conf = self.get_conf_log(preds[local_count][conf_key][mask])
                conf_loss = loss * conf - self.alpha * log_conf
                conf_loss = conf_loss.mean() if conf_loss.numel() > 0 else 0

                conf_details[self_name + f"_conf_loss_local/{local_count:02d}"] = float(conf_loss)

                local_count += 1

            total_conf_loss += conf_loss

        if local_count > 0:
            assert local_count == global_count, "Mismatch between the number of local and global losses."

        # Normalize total_conf_loss by the number of global and local losses separately
        total_conf_loss /= (global_count + local_count)

        details.update(conf_details)
        return total_conf_loss, details

class ConfLossMultiviewPose(MultiLoss):
    """Weighted regression by learned confidence for multiple views.
    This version normalizes the total confidence loss by the number of global and local losses separately.
    """

    def __init__(self, pixel_loss, pose_loss=None, cam_loss=None, depth_loss=None, vggt_pose=None, vggt_fov=None, alpha=1, conf_keys=None, quantile=False):
        super().__init__()
        assert alpha > 0
        self.alpha = alpha
        self.pixel_loss = pixel_loss.with_reduction("none")
        self.pose_loss = pose_loss
        self.cam_loss = cam_loss
        self.depth_loss = depth_loss
        self.vggt_pose = vggt_pose        
        self.vggt_fov = vggt_fov
        self.vggt = False
        self.quantile = quantile
        if conf_keys is None:
            self.conf_keys = {'global': "conf", 'local': "conf_self", 'depth':'conf_depth'}
        else:
            self.conf_keys = conf_keys
            if conf_keys['global'] != "conf":
                self.vggt = True

    def get_name(self):
        return f"ConfLossMultiviewV2({self.pixel_loss})"

    def get_conf_log(self, x):
        return x, torch.log(x)

    def compute_loss(self, gts, preds, **kw):
        # compute per-pixel loss for all views
        total_loss, details = self.pixel_loss(gts, preds, **kw)
        norm_factor_pr, norm_factor_gt = self.pixel_loss.norm_factor_pr, self.pixel_loss.norm_factor_gt
        total_conf_loss = 0
        conf_details = {}
        self_name = type(self).__name__

        # Separate counters for global and local losses        
        counts = {}
        # losses = {}
        closses = {}
        for loss, mask, loss_type in total_loss:
            if 'd' in loss_type:
                conf_key = self.conf_keys['depth']
            elif 'global' in loss_type:
                conf_key = self.conf_keys['global']
            else:
                conf_key = self.conf_keys['local']
            if loss_type not in counts:
                counts[loss_type] = 0
                # losses[loss_type] = 0
                closses[loss_type] = 0
            conf, log_conf = self.get_conf_log(preds[counts[loss_type]][conf_key][mask])
            conf_loss = loss * conf - self.alpha * log_conf

            if self.quantile:
                quantile_thresh = torch_quantile(conf_loss.detach(), gts[0]['quantile'][0]) 
                conf_loss = conf_loss[conf_loss < quantile_thresh] 

            conf_loss = conf_loss.mean() if conf_loss.numel() > 0 else 0

            conf_details[self_name + f"_conf_loss_{loss_type}/{counts[loss_type]:02d}"] = float(conf_loss)
            # losses[loss_type] += float(loss.mean())
            closses[loss_type] += float(conf_loss)
            counts[loss_type] = counts[loss_type] + 1

            total_conf_loss += conf_loss
        
        counts_sum = 0
        for k, v in closses.items():
            details[k] = v/counts[k]
            counts_sum += counts[k]
        # for k, v in losses.items():
            # details[k] = v/counts[k]
        # Normalize total_conf_loss by the number of global and local losses separately
        total_conf_loss /= counts_sum

        details.update(conf_details)

        # if global_count > 0:
        #     if "global_pose" in preds[0]:
        #         global_poses = torch.stack([pred["global_pose"] for pred in preds], dim=1)
        #         pose_variance = torch.var(global_poses, dim=1).mean()
        #         total_conf_loss += pose_variance * 0.1
        #         details[f"pvar"] = float(pose_variance)

        if self.pose_loss is not None:
            pose_loss, pose_detail = self.pose_loss(gts, preds, 
                                       norm_factor_pr=norm_factor_pr, 
                                       norm_factor_gt=norm_factor_gt, 
                                       norm_pose_separately=False,
                                       align=not self.pixel_loss.relative,
                                       **kw)
            total_conf_loss += pose_loss
            details |= pose_detail
        if self.depth_loss is not None:
            depth_loss, depth_detail = self.depth_loss(gts, preds, 
                                       norm_factor_pr=self.pixel_loss.norm_depth_factor_pr, 
                                       norm_factor_gt=self.pixel_loss.norm_depth_factor_gt, 
                                       **kw)
            total_conf_loss += depth_loss
            details |= depth_detail

        if self.cam_loss is not None:
            fov_err, fov_detail = self.cam_loss(gts, preds, 
                                       **kw)
            total_conf_loss += fov_err
            details |= fov_detail
        # if self.vggt_pose is not None:
        #     pose_loss = self.vggt_pose(gts, preds, 
        #                                norm_factor_pr=norm_factor_pr, 
        #                                norm_factor_gt=norm_factor_gt, 
        #                                **kw)
        #     total_conf_loss += pose_loss
        #     details[f"pose1"] = float(pose_loss)
        # if self.vggt_fov is not None:
        #     cam_loss = self.vggt_fov(gts, preds, 
        #                                **kw)
        #     total_conf_loss += cam_loss
        #     details[f"fov1"] = float(cam_loss)
        return total_conf_loss, details
    
class ConfLoss(MultiLoss):
    """Weighted regression by learned confidence.
        Assuming the input pixel_loss is a pixel-level regression loss.

    Principle:
        high-confidence means high conf = 0.1 ==> conf_loss = x / 10 + alpha*log(10)
        low  confidence means low  conf = 10  ==> conf_loss = x * 10 - alpha*log(10)

        alpha: hyperparameter
    """

    def __init__(self, pixel_loss, alpha=1):
        super().__init__()
        assert alpha > 0
        self.alpha = alpha
        self.pixel_loss = pixel_loss.with_reduction("none")

    def get_name(self):
        return f"ConfLoss({self.pixel_loss})"

    def get_conf_log(self, x):
        return x, torch.log(x)

    def compute_loss(self, gt1, gt2, pred1, pred2, **kw):
        # compute per-pixel loss
        ((loss1, msk1), (loss2, msk2)), details = self.pixel_loss(
            gt1, gt2, pred1, pred2, **kw
        )
        if loss1.numel() == 0:
            print("NO VALID POINTS in img1", force=True)
        if loss2.numel() == 0:
            print("NO VALID POINTS in img2", force=True)

        # weight by confidence
        conf1, log_conf1 = self.get_conf_log(pred1["conf"][msk1])
        conf2, log_conf2 = self.get_conf_log(pred2["conf"][msk2])
        conf_loss1 = loss1 * conf1 - self.alpha * log_conf1
        conf_loss2 = loss2 * conf2 - self.alpha * log_conf2

        # average + nan protection (in case of no valid pixels at all)
        conf_loss1 = conf_loss1.mean() if conf_loss1.numel() > 0 else 0
        conf_loss2 = conf_loss2.mean() if conf_loss2.numel() > 0 else 0

        return conf_loss1 + conf_loss2, dict(
            conf_loss_1=float(conf_loss1), conf_loss2=float(conf_loss2), **details
        )

def torch_quantile(
    input: torch.Tensor,
    q: float | torch.Tensor,
    dim: int | None = None,
    keepdim: bool = False,
    *,
    interpolation: str = "nearest",
    out: torch.Tensor | None = None,
) -> torch.Tensor:
    """Better torch.quantile for one SCALAR quantile.
    From VGGT loss

    Using torch.kthvalue. Better than torch.quantile because:
        - No 2**24 input size limit (pytorch/issues/67592),
        - Much faster, at least on big input sizes.

    Arguments:
        input (torch.Tensor): See torch.quantile.
        q (float): See torch.quantile. Supports only scalar input
            currently.
        dim (int | None): See torch.quantile.
        keepdim (bool): See torch.quantile. Supports only False
            currently.
        interpolation: {"nearest", "lower", "higher"}
            See torch.quantile.
        out (torch.Tensor | None): See torch.quantile. Supports only
            None currently.
    """
    # https://github.com/pytorch/pytorch/issues/64947
    # Sanitization: q
    try:
        q = float(q)
        assert 0 <= q <= 1
    except Exception:
        raise ValueError(f"Only scalar input 0<=q<=1 is currently supported (got {q})!")

    # Sanitization: dim
    # Because one cannot pass  `dim=None` to `squeeze()` or `kthvalue()`
    if dim_was_none := dim is None:
        dim = 0
        input = input.reshape((-1,) + (1,) * (input.ndim - 1))

    # Sanitization: inteporlation
    if interpolation == "nearest":
        inter = round
    elif interpolation == "lower":
        inter = floor
    elif interpolation == "higher":
        inter = ceil
    else:
        raise ValueError(
            "Supported interpolations currently are {'nearest', 'lower', 'higher'} "
            f"(got '{interpolation}')!"
        )

    # Sanitization: out
    if out is not None:
        raise ValueError(f"Only None value is currently supported for out (got {out})!")

    # Logic
    k = inter(q * (input.shape[dim] - 1)) + 1
    out = torch.kthvalue(input, k, dim, keepdim=True, out=out)[0]

    # Rectification: keepdim
    if keepdim:
        return out
    if dim_was_none:
        return out.squeeze()
    else:
        return out.squeeze(dim)

    return out
