import sys
from tkinter import S
from typing import Tuple
from xxlimited import Str
import torch
import torch.nn as nn

from src.dust3r.utils.geometry import normalize_pointcloud_group
from fast3r.dust3r.loss.vggt_loss import normalize_pointcloud_nomask
from fast3r.dust3r.utils.geometry import inv
from fast3r.dust3r.heads.camera import relative_pose_absT_quatR, camera_to_pose_encoding, pose_encoding_to_camera, is_valid_camera_pose_torch ,camera_from_preds
from fast3r.dust3r.losses import Criterion, MultiLoss

from fast3r.utils import pylogger
log = pylogger.RankedLogger(__name__, rank_zero_only=True)
class VGGTCameraFOVLoss(MultiLoss):
    def __init__(self, weight: float = 1.0, pred_key='vggt_fov'):
        super().__init__()
        self.weight = weight
        self.pred_key = pred_key
        self.tflag = True

    def get_name(self):
        return 'fov'

    def compute_loss(
        self,
        gts, preds, 
        **kw
    ) -> torch.Tensor:
        loss = self.compute_cam_loss(gts, preds, pred_key=self.pred_key, **kw)
        return self.weight * loss

    @staticmethod
    def intri_to_normalized_focal(intrinsics, image_size_hw):
        """
        Convert intrinsic matrix to normalized focal lengths.
        
        Args:
            intrinsics: PyTorch tensor of shape [..., 3, 3], camera intrinsic matrix in OpenCV convention.
            image_size_hw: Tuple (height, width) of image in pixels.
        
        Returns:
            torch.Tensor: Shape [..., 2], containing normalized focal lengths [alpha_y, alpha_x].
        
        Raises:
            ValueError: If intrinsics shape is invalid, resolution is non-positive, or focal lengths are non-positive.
        """
        H, W = image_size_hw
        if H <= 0 or W <= 0:
            raise ValueError("Image height and width must be positive")
        
        # Validate intrinsics shape
        if intrinsics.shape[-2:] != (3, 3):
            raise ValueError("Intrinsics must have shape [..., 3, 3]")
        
        # Extract focal lengths
        fx = intrinsics[..., 0, 0]
        fy = intrinsics[..., 1, 1]
        
        # Compute normalized focal lengths
        alpha_x = fx / W
        alpha_y = fy / H
        
        # Stack into encoding [alpha_y, alpha_x] to match FOV order [fov_h, fov_w]
        intri_encoding = torch.stack([alpha_y, alpha_x], dim=-1).float()
        
        return intri_encoding

    @staticmethod
    def intri_to_fov(intrinsics, image_size_hw):
        H, W = image_size_hw
        fov_h = 2 * torch.atan((H / 2) / intrinsics[..., 1, 1])
        fov_w = 2 * torch.atan((W / 2) / intrinsics[..., 0, 0])
        intri_encoding = torch.stack([fov_h[...], fov_w[...]], dim=1).float()
        return intri_encoding

    def compute_cam_loss(self,
        gts, preds, pred_key,
        **kw
    ) -> torch.Tensor:
        image_size_hw = gts[0]["img"].shape[-2:]
        # norm = max(image_size_hw)/norm
        
        cam_preds = [pred[pred_key] for pred in preds]
        cam_gts = [VGGTCameraFOVLoss.intri_to_fov(gt["camera_intrinsics"], image_size_hw) for gt in gts]
        
        
        # stack across views: result shape B x N x ...
        cam_gts = torch.stack(cam_gts, dim=1)   # (B, N, 2)
        cam_preds = torch.stack(cam_preds, dim=1)   # (B, N, 2)
        assert len(cam_preds.shape)==3 and cam_preds.shape[-1] == 2

        cam_err = torch.norm((cam_preds - cam_gts), dim=-1).mean()     # (B, N)
        if MultiLoss.first_batch:
            log.info(f"{cam_gts[0, 0, :]} {cam_preds[0, 0, :]} {cam_err}")
        return cam_err
    
class CameraIntrinsicLoss(MultiLoss):
    def __init__(self, weight: float = 1.0, pred_key='vggt_fov', fuv_scaler=256):
        super().__init__()
        self.weight = weight
        self.pred_key = pred_key
        self.tflag = True
        MultiLoss.fuv_scaler=fuv_scaler
    
    def get_name(self):
        return 'fuv'
    
    def compute_loss(
        self,
        gts, preds,
        **kw
    ) -> torch.Tensor:
        fuv_err = self.compute_cam_loss(gts, preds, pred_key=self.pred_key, **kw)
        return self.weight * fuv_err

    
    @staticmethod
    def intri_to_encoding(intrinsics, H=0, W=0):        
        intri_encoding = torch.stack( [intrinsics[..., 0, 0], intrinsics[..., 1, 1]], dim=1).float()
        return intri_encoding

    def compute_cam_loss(self,
        gts, preds, pred_key, 
        **kw
    ) -> torch.Tensor:
        H, W = gts[0].get("true_shape")[0]
        cam_preds = [pred[pred_key] for pred in preds]
        cam_gts = [CameraIntrinsicLoss.intri_to_encoding(gt["camera_intrinsics"])/MultiLoss.fuv_scaler for gt in gts]
        
        # stack across views: result shape B x N x ...
        cam_gts = torch.stack(cam_gts, dim=1)   # (B, N, 2)
        cam_preds = torch.stack(cam_preds, dim=1)   # (B, N, 2)
        # print(cam_gts.shape, cam_preds.shape)

        fuv_err = torch.norm((cam_preds - cam_gts), dim=-1).mean()     # (B, N)
        # print(fuv_err)
        if MultiLoss.first_batch:
            log.info(f"{cam_gts[0, 1, :]} {cam_preds[0, 1, :]} {fuv_err}")
        return fuv_err



class CameraPoseLoss(MultiLoss):
    """
    Standalone camera-pose error loss extracted from CUT3R's Regr3DPoseBatchList.

    Usage:
        loss_module = CameraPoseLoss(weight=1.0)
        loss = loss_module(gt_poses, pred_poses, pose_masks)
    """
    def __init__(self, weight: float = 1.0, pose_key: str = 'camera_pose', compute_relative: bool = True, alpha: float = 1.0):
        super().__init__()
        self.weight = weight
        self.pose_key=pose_key
        self.train_flag = True
        self.compute_relative = compute_relative
        self.alpha = alpha

    def get_name(self):
        return 'pose'

    def compute_loss(
        self,
        gts, preds,
        **kw
    ):
        """
        Args:
            gt_poses: list of (trans: Bx3 tensor, quat: Bx4 tensor) for each view
            pred_poses: same format as gt_poses
            masks:    optional boolean mask of shape (B,) to include only certain samples
        Returns:
            scalar pose loss
        """
        trans_err, quat_err = self.compute_pose_loss(gts, preds, pose_key=self.pose_key, compute_relative=self.compute_relative, **kw)

        return self.weight * (trans_err + quat_err * self.alpha), {'trans':trans_err,'quats': quat_err}
    
    @staticmethod
    def process_poses(gts, preds, 
        norm_factor_pr: torch.Tensor = None, 
        norm_factor_gt: torch.Tensor = None, 
        norm_pose_separately: bool = False,
        eps=1e-3,
        pose_key=None,
        align=False,
        **kw):
        # in_camera1 = inv(gts[0]['camera_pose'])in_camera1 @
        gt_poses = [
            camera_to_pose_encoding( gt['camera_pose'])
            for gt in gts
        ]

        if align:
            pr_poses = camera_from_preds(preds, pose_key)
            pr_poses = [camera_to_pose_encoding(pose) for pose in pr_poses]
        else:                    
            # pr_camera1 = inv(preds[0][pose_key])
            pr_poses = [pred[pose_key].clone() for pred in preds]
        
        # print(norm_factor_gt.shape, norm_factor_pr.shape)
        # pose_norm_factor_gt = norm_factor_gt.squeeze(2, 3)
        # pose_norm_factor_pr = norm_factor_pr.squeeze(2, 3)
        
        # pose_masks = (norm_factor_gt.squeeze() > eps) & (
        #     norm_factor_pr.squeeze() > eps
        # )pose_masks
        # gt_poses[0] = (torch.zeros_like(gt_poses[0][0], dtype=gt_poses[0][0].dtype), gt_poses[0][1])
        # pr_poses[0] = gt_poses[0]
        gt_poses = [
            (gt[:, :3], gt[:, 3:]) for gt in gt_poses
        ]
        pr_poses = [
            (pr[:, :3], pr[:, 3:]) for pr in pr_poses
        ]
        return gt_poses, pr_poses, None
    
    def compute_pose_loss(self,
        gts, preds, compute_relative: bool = True, norm_pose_separately: bool = False,
        norm_factor_pr: torch.Tensor = None, 
        norm_factor_gt: torch.Tensor = None, 
        eps=1e-3,
        **kw
    ):
        """
        Computes mean norm difference in translation and quaternion.
        """
        gt_poses, pred_poses, masks = CameraPoseLoss.process_poses(gts, preds, **kw)
        # stack across views: result shape B x N x ...
        gt_trans = torch.stack([t for (t, q) in gt_poses], dim=1)   # (B, N, 3)
        gt_quats = torch.stack([q for (t, q) in gt_poses], dim=1)   # (B, N, 4)
        pr_trans = torch.stack([t for (t, q) in pred_poses], dim=1)  # (B, N, 3)
        pr_quats = torch.stack([q for (t, q) in pred_poses], dim=1)  # (B, N, 4)

        if MultiLoss.first_batch:
            log.info(f"gt_trans {gt_trans[0,-1,:]}, {pr_trans[0,-1,:]}")
            log.info(f'gt_quats {gt_quats[0,-1,:]}, {pr_quats[0,-1,:]}')

        if norm_pose_separately or norm_factor_gt is None:
            norm_factor_gt, norm_factor_pr = CameraPoseLoss.get_norm_factor_poses(gt_trans, pr_trans, not_metric_mask=~gts[0]["is_metric"])

        if norm_factor_gt is not None:
            B = gt_trans.shape[0]
            n_dims = gt_trans.ndim
            norm_factor_gt2 = norm_factor_gt.clip(eps).view([B] + [1] * (n_dims - 1))
            norm_factor_pr2 = norm_factor_pr.clip(eps).view([B] + [1] * (n_dims - 1))
            gt_trans = gt_trans / norm_factor_gt2
            pr_trans = pr_trans / norm_factor_pr2

        # if masks is None:
        # full-batch
        trans_err = torch.norm(pr_trans[:, :] - gt_trans[:, :], dim=-1).mean()     # (B, N)
        quat_err = torch.norm(pr_quats[:, :] - gt_quats[:, :], dim=-1).mean()       # (B, N-1)
        if compute_relative:
            rel_trans_err, rel_quats_err  = CameraPoseLoss.compute_relative_pose_loss(gt_trans, gt_quats, pr_trans, pr_quats)
        # else:
        #     if not any(masks):
        #         return torch.tensor(0.0, device=gt_trans.device)
        #     # select only valid samples
    #     trans_err = torch.norm(pr_trans[masks] - gt_trans[masks], dim=-1).mean()
    #     quat_err  = torch.norm(pr_quats[masks] - gt_quats[masks], dim=-1).mean()
        #     if compute_relative:
        #         rel_trans_err, rel_quats_err = CameraPoseLoss.compute_relative_pose_loss(gt_trans, gt_quats, pr_trans, pr_quats, masks)

        if compute_relative:
            return trans_err + rel_trans_err, quat_err + rel_quats_err
        else:
            return trans_err, quat_err
    
    @staticmethod
    def compute_relative_pose_loss(
        gt_trans, gt_quats, pr_trans, pr_quats, masks=None
    ):
        if masks is None:
            masks = torch.ones(len(gt_trans), dtype=torch.bool, device=gt_trans.device)
        gt_trans_matrix1 = gt_trans[:, :, None, :].repeat(1, 1, gt_trans.shape[1], 1)[
            masks
        ]
        gt_trans_matrix2 = gt_trans[:, None, :, :].repeat(1, gt_trans.shape[1], 1, 1)[
            masks
        ]
        gt_quats_matrix1 = gt_quats[:, :, None, :].repeat(1, 1, gt_quats.shape[1], 1)[
            masks
        ]
        gt_quats_matrix2 = gt_quats[:, None, :, :].repeat(1, gt_quats.shape[1], 1, 1)[
            masks
        ]
        pr_trans_matrix1 = pr_trans[:, :, None, :].repeat(1, 1, pr_trans.shape[1], 1)[
            masks
        ]
        pr_trans_matrix2 = pr_trans[:, None, :, :].repeat(1, pr_trans.shape[1], 1, 1)[
            masks
        ]
        pr_quats_matrix1 = pr_quats[:, :, None, :].repeat(1, 1, pr_quats.shape[1], 1)[
            masks
        ]
        pr_quats_matrix2 = pr_quats[:, None, :, :].repeat(1, pr_quats.shape[1], 1, 1)[
            masks
        ]

        gt_rel_trans, gt_rel_quats = relative_pose_absT_quatR(
            gt_trans_matrix1, gt_quats_matrix1, gt_trans_matrix2, gt_quats_matrix2
        )
        pr_rel_trans, pr_rel_quats = relative_pose_absT_quatR(
            pr_trans_matrix1, pr_quats_matrix1, pr_trans_matrix2, pr_quats_matrix2
        )
        rel_trans_err = torch.norm(gt_rel_trans - pr_rel_trans, dim=-1)
        rel_quats_err = torch.norm(gt_rel_quats - pr_rel_quats, dim=-1)
        return rel_trans_err.mean(), rel_quats_err.mean()
    
    @staticmethod
    def get_norm_factor_poses(gt_trans, pr_trans, not_metric_mask=None, norm_mode='avg_dis', gt_scale=False):

        if not gt_scale:
            assert gt_trans.ndim == 3
            norm_factor_gt = normalize_pointcloud_nomask(
                                gt_trans,
                                return_pts=False,
                            )
        else:
            norm_factor_gt = torch.ones(
                len(gt_trans), dtype=gt_trans[0].dtype, device=gt_trans[0].device
            )
        norm_factor_pr = norm_factor_gt.clone()
        if not gt_scale and not_metric_mask is not None and not_metric_mask.sum() > 0:
            norm_factor_pr[not_metric_mask] = normalize_pointcloud_nomask(
                                pr_trans[not_metric_mask],
                                return_pts=False,
                            )
        return norm_factor_gt, norm_factor_pr
