



import numpy as np

from os.path import join, split, exists,dirname

import numpy as np
from typing import Literal
import numpy as np
from typing import Literal, Tuple

import json
from loguru import logger





def normalize_depth(inverse_depth):
    max_invdepth_vizu = min(inverse_depth.max(), 1 / 0.1)
    min_invdepth_vizu = max(1 / 250, inverse_depth.min())
    inverse_depth_normalized = (inverse_depth - min_invdepth_vizu) / (max_invdepth_vizu - min_invdepth_vizu)

    return inverse_depth_normalized



def align_scale_shift(pred,  target):

    mask = target > 0
    target_mask = target[mask]
    pred_mask = pred[mask]

    if mask.sum() > 10:
        scale, shift = np.polyfit(pred_mask, target_mask, deg=1)

        if scale < 0:
            scale = np.median(target[mask]) / (np.median(pred[mask]) + 1e-8)
            shift = 0
    else:
        scale = 1
        shift = 0
    pred = pred * scale + shift
    return pred, scale

def _estimate_scale(
    pred: np.ndarray,
    gt:   np.ndarray,
    mask: np.ndarray,
    mode: Literal["median", "ls"] = "median",
) -> float:
    """返回把 pred 缩放到 gt 的比例因子 s."""
    if mode == "median":
        s = np.median(gt[mask]) / np.median(pred[mask])
    elif mode == "ls":
        num = (pred[mask] * gt[mask]).sum()
        den = (pred[mask] ** 2).sum()
        s = num / max(den, 1e-12)
    else:
        raise ValueError("mode must be 'median' or 'ls'")
    return float(s)

def depth_error_map_si(
    pred: np.ndarray,
    gt:   np.ndarray,
    method: Literal["abs", "sq", "rel", "log"] = "abs",
    scale_mode: Literal["median", "ls"] = "median",
    valid_range: Tuple[float, float] | None = (1e-3, np.inf),
    eps: float = 1e-6,
) -> np.ndarray:
    """
    计算“尺度对齐”后的误差图 (Scale-Aligned Error Map)。

    Parameters
    ----------
    pred, gt : np.ndarray  (H, W)
        预测 / 真值深度图。允许单位不同，只要二者成比例即可。
    method : {"abs","sq","rel","log"}
        误差度量；含义同前。
    scale_mode : {"median","ls"}
        估计全局比例因子的方式。
    valid_range : (lo, hi) or None
        深度有效区间；超出范围的像素会被排除在比例估计之外。
        设为 None 则不使用范围过滤。
    eps : float
        防止除零或 log0。

    Returns
    -------
    np.ndarray
        与输入同形状的误差图（已对齐尺度）。
    """
    if pred.shape != gt.shape:
        raise ValueError(f"shape mismatch: {pred.shape} vs {gt.shape}")

    pred = pred.astype(np.float32)
    gt   = gt.astype(np.float32)

    # 1) 有效像素掩码
    if valid_range is not None:
        lo, hi = valid_range
        mask = (gt > lo) & (pred > lo) & (gt < hi) & (pred < hi)
    else:
        mask = np.ones_like(gt, dtype=bool)

    if mask.sum() == 0:
        raise ValueError("No valid pixels for scale estimation.")

    # 2) 估计比例并对齐
    s = _estimate_scale(pred, gt, mask, scale_mode)
    pred_aligned = pred * s

    # 3) 计算误差图
    if method == "abs":
        err = np.abs(pred_aligned - gt)
    elif method == "sq":
        err = (pred_aligned - gt) ** 2
    elif method == "rel":
        err = np.abs(pred_aligned - gt) / np.maximum(gt, eps)
    elif method == "log":
        err = np.abs(
            np.log(np.maximum(pred_aligned, eps)) - np.log(np.maximum(gt, eps))
        )
    else:
        raise ValueError("Unknown method.")

    return err



def compute_errors(gt, pred):
    """Compute metrics for 'pred' compared to 'gt'

    Args:
        gt (numpy.ndarray): Ground truth values
        pred (numpy.ndarray): Predicted values

        gt.shape should be equal to pred.shape

    Returns:
        dict: Dictionary containing the following metrics:
            'a1': Delta1 accuracy: Fraction of pixels that are within a scale factor of 1.25
            'a2': Delta2 accuracy: Fraction of pixels that are within a scale factor of 1.25^2
            'a3': Delta3 accuracy: Fraction of pixels that are within a scale factor of 1.25^3
            'abs_rel': Absolute relative error
            'rmse': Root mean squared error
            'log_10': Absolute log10 error
            'sq_rel': Squared relative error
            'rmse_log': Root mean squared error on the log scale
            'silog': Scale invariant log error
    """
    thresh = np.maximum((gt / pred), (pred / gt))
    a1 = (thresh < 1.25).mean()
    a2 = (thresh < 1.25 ** 2).mean() #*  1.5625
    a3 = (thresh < 1.25 ** 3).mean() #* 1.953125

    abs_rel = np.mean(np.abs(gt - pred) / gt)
    sq_rel = np.mean(((gt - pred) ** 2) / gt)

    rmse = (gt - pred) ** 2
    rmse = np.sqrt(rmse.mean())

    rmse_log = (np.log(gt) - np.log(pred)) ** 2
    rmse_log = np.sqrt(rmse_log.mean())

    err = np.log(pred) - np.log(gt)
    silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
    log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()

    # return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log,
    #             silog=silog, sq_rel=sq_rel)
    return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, )




if __name__ =="__main__":

    loader = InferenceLoader(jsons=['/share/project/cwm/shaocong.xu/exp/ml-depth-pro/data/tricky_nogt_2025/test.jsonl'])

    for x in loader:
        print(x)
        break