# Copyright (C) 2024-present Naver Corporation. All rights reserved.
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).
#
# --------------------------------------------------------
# modified from DUSt3R

import sys
import torch
import torch.nn.functional as F


def postprocess(out, depth_mode, conf_mode, pos_z=False):
    """
    extract 3D points/confidence from prediction head output
    """
    fmap = out.permute(0, 2, 3, 1)  # B,H,W,3
    res = dict(pts3d=reg_dense_depth(fmap[:, :, :, 0:3], mode=depth_mode, pos_z=pos_z))

    if conf_mode is not None:
        res["conf"] = reg_dense_conf(fmap[:, :, :, 3], mode=conf_mode)
    return res

def postprocess_depth(out, depth_mode, conf_mode, pos_z=False):
    """
    extract 3D points/confidence from prediction head output
    """
    fmap = out.permute(0, 2, 3, 1)  # B,H,W,3
    res = dict(depth=reg_dense_depth(fmap[:, :, :, 0], mode=depth_mode, pos_z=pos_z))

    if conf_mode is not None:
        res["conf"] = reg_dense_conf(fmap[:, :, :, 1], mode=conf_mode)
    return res

def postprocess_rgb(out, depth_mode=None, conf_mode=None, eps=1e-6, **kwargs):
    fmap = out.permute(0, 2, 3, 1)  # B,H,W,3
    res = torch.sigmoid(fmap) * (1 - 2 * eps) + eps
    res = (res - 0.5) * 2
    return dict(rgb=res)


def postprocess_camera(out: torch.Tensor, mode_config, inverse: bool = False, fov: bool =False) -> torch.Tensor:
    """
    Extract and process pose (translation and quaternion) and FOVs from prediction head output.
    Translation is processed according to mode_config.
    Quaternions are standardized.
    FOVs are passed through without modification by this function.

    Args:
        out (torch.Tensor): Prediction tensor. The last dimension is expected to be 9,
                            ordered as [translation (3), quaternion (4), fovs (2)].
        mode_config (tuple): A tuple containing (mode_str, vmin, vmax).
                             mode_str (str): The processing mode for translation ("linear", "square", "exp").
                             vmin (float): Minimum value for clipping (relevant if 'assert no_bounds' was removed
                                         and 'no_bounds' was False in 'linear' mode).
                             vmax (float): Maximum value for clipping (relevant as above).
        inverse (bool): Flag for inverse transformation in "square" or "exp" modes. Defaults to False.

    Returns:
        torch.Tensor: Processed tensor with the last dimension of size 9,
                      ordered as [processed_trans, standardized_quats, fovs].
                      
    Raises:
        ValueError: If the last dimension of 'out' is not 9.
        AssertionError: If no_bounds is False, based on the original function's assertion.
    """
    mode_str, vmin, vmax = mode_config

    no_bounds = (vmin == -float("inf")) and (vmax == float("inf"))
    # This assertion is from the original code. It implies that for any mode processed
    # by this function, the bounds are expected to be non-restrictive [-inf, +inf].
    assert no_bounds, "Bounds must be [-inf, +inf] due to original assertion logic for this function."

    # Check input tensor shape
    if out.shape[-1] != 9:
        raise ValueError(
            f"Last dimension of 'out' tensor must be 9 (for trans, quats, fovs), but got {out.shape[-1]}"
        )

    # Extract components from the output tensor
    trans = out[..., 0:3]
    quats = out[..., 3:7]
    if fov:
        fovs  = out[..., 7:9]  # Extract FOVs

    # Process translation based on the mode
    # Note: This revised version ensures 'trans' is processed, and then 'quats'
    # are standardized, followed by concatenation with 'fovs' for all modes
    # to maintain a consistent output structure.
    if mode_str == "linear":
        # With `assert no_bounds` active, `no_bounds` is True.
        # Thus, 'trans' remains as initially sliced.
        # If 'assert no_bounds' were not present and no_bounds could be False,
        # clipping would be applied here:
        # if not no_bounds:
        #     trans = trans.clip(min=vmin, max=vmax)
        pass  # 'trans' requires no modification in this case.

    elif mode_str == "square":
        d = trans.norm(dim=-1, keepdim=True)
        if inverse:
            scale = d / d.square().clip(min=1e-8)
        else:
            scale = d.square() / d.clip(min=1e-8)
        trans = trans * scale
    
    elif mode_str == "exp":
        d = trans.norm(dim=-1, keepdim=True)
        if inverse:
            scale = d / torch.expm1(d).clip(min=1e-8)  # torch.expm1(d) computes e^d - 1
        else:
            scale = torch.expm1(d) / d.clip(min=1e-8)
        trans = trans * scale
    

    # else:
        # If mode_str is not "linear", "square", or "exp", 'trans' remains as initially sliced.
        # One might consider raising an error for unknown modes:
        # raise ValueError(f"Unknown processing mode for translation: {mode_str}")

    # Standardize quaternions (assuming standardize_quaternion function is available)
    quats = standardize_quaternion(quats)

    if fov:
        if False:
            fovs = torch.exp(fovs)
        else:
            fovs = F.softplus(fovs)
        # Concatenate processed translation, standardized quaternions, and FOVs
        return torch.cat([trans, quats, fovs], dim=-1)
    else:
        return torch.cat([trans, quats], dim=-1)

# You would also need the standardize_quaternion function defined:
# def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:
#     quaternions = F.normalize(quaternions, p=2, dim=-1)
#     return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
def postprocess_pose(out, mode, inverse=False):
    """
    from CUT3R
    extract pose from prediction head output
    """
    mode, vmin, vmax = mode

    no_bounds = (vmin == -float("inf")) and (vmax == float("inf"))
    assert no_bounds
    trans = out[..., 0:3]
    quats = out[..., 3:7]

    if mode == "linear":
        if no_bounds:
            return trans  # [-inf, +inf]
        return trans.clip(min=vmin, max=vmax)

    d = trans.norm(dim=-1, keepdim=True)

    if mode == "square":
        if inverse:
            scale = d / d.square().clip(min=1e-8)
        else:
            scale = d.square() / d.clip(min=1e-8)

    if mode == "exp":
        if inverse:
            scale = d / torch.expm1(d).clip(min=1e-8)
        else:
            scale = torch.expm1(d) / d.clip(min=1e-8)

    if mode == "inv_log":
        trans = torch.sign(trans) * (torch.expm1(torch.abs(input=trans)))
    else:
        trans = trans * scale
    # print(f'quats norm: {quats.norm(dim=-1, keepdim=False).mean()}')
    quats = standardize_quaternion(quats)

    return torch.cat([trans, quats], dim=-1)


def postprocess_pose_conf(out):
    fmap = out.permute(0, 2, 3, 1)  # B,H,W,1
    return dict(pose_conf=torch.sigmoid(fmap))


def postprocess_desc(out, depth_mode, conf_mode, desc_dim, double_channel=False):
    """
    extract 3D points/confidence from prediction head output
    """
    fmap = out.permute(0, 2, 3, 1)  # B,H,W,3
    res = dict(pts3d=reg_dense_depth(fmap[:, :, :, 0:3], mode=depth_mode))

    if conf_mode is not None:
        res["conf"] = reg_dense_conf(fmap[:, :, :, 3], mode=conf_mode)

    if double_channel:
        res["pts3d_self"] = reg_dense_depth(
            fmap[
                :, :, :, 3 + int(conf_mode is not None) : 6 + int(conf_mode is not None)
            ],
            mode=depth_mode,
        )
        if conf_mode is not None:
            res["conf_self"] = reg_dense_conf(
                fmap[:, :, :, 6 + int(conf_mode is not None)], mode=conf_mode
            )

    start = (
        3
        + int(conf_mode is not None)
        + int(double_channel) * (3 + int(conf_mode is not None))
    )
    res["desc"] = reg_desc(fmap[:, :, :, start : start + desc_dim], mode="norm")
    res["desc_conf"] = reg_dense_conf(fmap[:, :, :, start + desc_dim], mode=conf_mode)
    assert start + desc_dim + 1 == fmap.shape[-1]

    return res


def reg_desc(desc, mode="norm"):
    if "norm" in mode:
        desc = desc / desc.norm(dim=-1, keepdim=True)
    else:
        raise ValueError(f"Unknown desc mode {mode}")
    return desc


def reg_dense_depth(xyz, mode, pos_z=False):
    """
    extract 3D points from prediction head output
    """
    mode, vmin, vmax = mode

    no_bounds = (vmin == -float("inf")) and (vmax == float("inf"))
    assert no_bounds

    if mode == "linear":
        if no_bounds:
            return xyz  # [-inf, +inf]
        return xyz.clip(min=vmin, max=vmax)
    if mode == "inv_log":
        return torch.sign(xyz) * (torch.expm1(torch.abs(xyz)))
    if pos_z:
        sign = torch.sign(xyz[..., -1:])
        xyz *= sign
    d = xyz.norm(dim=-1, keepdim=True)
    xyz = xyz / d.clip(min=1e-8)

    if mode == "square":
        return xyz * d.square()

    if mode == "exp":
        return xyz * torch.expm1(d)

    raise ValueError(f"bad {mode=}")


def reg_dense_conf(x, mode):
    """
    extract confidence from prediction head output
    """
    mode, vmin, vmax = mode
    if mode == "exp":
        return vmin + x.exp().clip(max=vmax - vmin)
    if mode == "sigmoid":
        return (vmax - vmin) * torch.sigmoid(x) + vmin
    raise ValueError(f"bad {mode=}")


def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:
    """
    Convert a unit quaternion to a standard form: one in which the real
    part is non negative.

    Args:
        quaternions: Quaternions with real part first,
            as tensor of shape (..., 4).

    Returns:
        Standardized quaternions as tensor of shape (..., 4).
    """
    quaternions = F.normalize(quaternions, p=2, dim=-1)
    return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
