
import sys
from tkinter import S
from typing import Tuple
from xxlimited import Str
import torch
import torch.nn as nn

from fast3r.dust3r.utils.geometry import inv
from fast3r.dust3r.heads.camera import relative_pose_absT_quatR, camera_to_pose_encoding
from fast3r.dust3r.losses import Criterion, MultiLoss, LLoss, L21Loss, torch_quantile

from fast3r.utils import pylogger
log = pylogger.RankedLogger(__name__, rank_zero_only=True)

class DepthScaleShiftInvLoss(LLoss):
    """scale and shift invariant loss"""

    def __init__(self, reduction="none", norm=True):
        super().__init__(reduction)
        self.norm = norm

    def forward(self, pred, gt, mask):
        assert pred.shape == gt.shape and pred.ndim == 3, f"Bad shape = {pred.shape}"
        dist = self.distance(pred, gt, mask)
        # assert dist.ndim == a.ndim - 1  # one dimension less
        if self.reduction == "none":
            return dist
        if self.reduction == "sum":
            return dist.sum()
        if self.reduction == "mean":
            return dist.mean() if dist.numel() > 0 else dist.new_zeros(())
        raise ValueError(f"bad {self.reduction=} mode")

    def normalize(self, x, mask):
        x_valid = x[mask]
        splits = mask.sum(dim=(1, 2)).tolist()
        x_valid_list = torch.split(x_valid, splits)
        shift = [x.mean() for x in x_valid_list]
        x_valid_centered = [x - m for x, m in zip(x_valid_list, shift)]
        scale = [x.abs().mean() for x in x_valid_centered]
        scale = torch.stack(scale)
        shift = torch.stack(shift)
        x = (x - shift.view(-1, 1, 1)) / scale.view(-1, 1, 1).clamp(min=1e-6)
        return x

    def distance(self, pred, gt, mask):
        if self.norm:
            pred = self.normalize(pred, mask)
            gt = self.normalize(gt, mask)
        return torch.abs((pred - gt)[mask])

from fast3r.dust3r.loss.vggt_loss import reg_loss2, check_and_fix_inf_nan, normalize_pointcloud
class ConfDepthLoss(MultiLoss):
    """Weighted regression by learned confidence for multiple views.
    This version normalizes the total confidence loss by the number of global and local losses separately.
    """

    def __init__(self, pixel_loss=None, alpha=0.1, weight: float = 1.0, pred_key='depth_vggt', conf_key='conf_depth_vggt', gradient_loss='grad', normalize=True):
        super().__init__()
        assert alpha > 0
        self.alpha = alpha
        # self.pixel_loss = pixel_loss.with_reduction("none")
        self.pixel_loss = reg_loss2
        self.weight = weight
        self.pred_key = pred_key
        self.conf_key = conf_key
        self.gradient_loss = gradient_loss  #normal
        self.normalize = normalize

    def get_name(self):
        return 'depth'

    def get_conf_log(self, x):
        return x, torch.log(x)

    def compute_loss(self, gts, preds,norm_factor_pr=1.0, 
                                       norm_factor_gt=1.0, **kw):
        mask_gt = torch.stack([gt["valid_mask"] for gt in gts], dim=1)
        depth_gt = torch.stack([gt['depthmap'] for gt in gts], dim=1)[..., None]
        is_metric = torch.stack([gt['is_metric'] for gt in gts], dim=1)
        if preds[0][self.pred_key].ndim == 3:  
            depth_pred = torch.stack([pred[self.pred_key] for pred in preds], dim=1)  
        elif preds[0][self.pred_key].ndim == 4: 
            depth_pred = torch.stack([pred[self.pred_key][..., -1] for pred in preds], dim=1)
        # else:
        #     raise NotImplementedError
        conf_pred = torch.stack([pred[self.conf_key] for pred in preds], dim=1)
        if depth_pred.ndim == 4:
            depth_pred = depth_pred[..., None]
        # print(depth_pred[0, 0, 5, 50])
        #  bb, ss, hh, ww, nc = first_frame_pts3d.shape
        # torch.Size([8, 4, 238, 238, 1]) torch.Size([8, 4, 238, 238, 1]) torch.Size([8, 4, 238, 238]) torch.Size([8, 4]) torch.Size([8, 4, 238, 238])
        # print(depth_pred.shape, depth_gt.shape, mask_gt.shape, is_metric.shape, conf_pred.shape)
        # if self.normalize:
        # if not is_metric.all():
        #     metric_mask = ~is_metric[:,0]
        #     depth_gt[metric_mask], depth_gt_scale = normalize_pointcloud(depth_gt[metric_mask], mask_gt[metric_mask])
        #     depth_pred[metric_mask], depth_pred_scale = normalize_pointcloud(depth_pred[metric_mask], mask_gt[metric_mask])
        if norm_factor_gt is not None:
            B = depth_gt.shape[0]
            n_dims = depth_gt.ndim
            norm_factor_gt2 = norm_factor_gt.view([B] + [1] * (n_dims - 1))
            norm_factor_pr2 = norm_factor_pr.view([B] + [1] * (n_dims - 1))
            depth_gt /= norm_factor_gt2
            depth_pred /= norm_factor_pr2
        # if (conf_pred < 0).any():
        #     print('conf_pred < 0', (conf_pred < 0).sum()) 
        # if (depth_pred < 0).any():
        #     print('depth_pred < 0', (depth_pred < 0).sum())
        # depth_gt, depth_gt_scale = normalize_pointcloud(depth_gt, mask_gt)
        # depth_pred, depth_pred_scale = normalize_pointcloud(depth_pred, mask_gt)
        loss_reg, loss_grad = self.pixel_loss(depth_pred, depth_gt, mask_gt, gradient_loss=self.gradient_loss)
        # print(loss_reg.shape, loss_grad, self.gradient_loss)
        
        # if (loss_reg < 0).any():
        #     print('loss_reg < 0', (loss_reg < 0).sum()) 
        conf_masked = conf_pred[mask_gt] 
        conf, log_conf = self.get_conf_log(conf_masked)

        conf_loss = loss_reg * conf - self.alpha * log_conf        
        # conf_loss = check_and_fix_inf_nan(conf_loss, 'depth_conf_loss', hard_max = 100)

        quantile_thresh = torch_quantile(conf_loss.detach(), gts[0]['quantile'][0])
        conf_loss = conf_loss[conf_loss < quantile_thresh]     

        conf_loss = conf_loss.mean() if conf_loss.numel() > 0 else 0
        
        # print({'depth': conf_loss, 'grad': loss_grad})          

        return self.weight * (conf_loss + loss_grad), { 'depth':loss_reg.mean(),'cdepth': conf_loss, 'grad': loss_grad}
