import numpy as np
import torch
from PIL import Image

from src.util.depth_transform import DepthNormalizerBase, ScaleShiftDepthNormalizer
from src.util.alignment import align_depth_least_square
from torchvision.transforms.functional import resize

# pred_depth must be [0 , 1]
# 如果erase设置为True且gt_depth最小值小于eps且大于0，那么会把相应位置的pred_depth设置为0
def aligin_and_erase(gt_depth, pred_depth, mask, erase=True, eps=1e-6):
    H, W = pred_depth.shape
    if isinstance(mask, Image.Image):
        mask = torch.tensor(np.array(mask))
        # mask = resize(mask, (H, W))
    mask = mask[:, :, 0] > 120
    gt_depth = torch.tensor(gt_depth)
    # gt_depth = resize(gt_depth, (H, W))
    test_gt_depth = gt_depth.clone().detach()
    test_gt_depth[mask] = 0
    pred_depth = torch.tensor(pred_depth)
    valid_mask = torch.ones_like(torch.as_tensor(gt_depth)).bool()
    normalizer = ScaleShiftDepthNormalizer(norm_min=-1.0,norm_max=1.0,min_max_quantile=0.02,clip=False)
    norm_depth = normalizer(test_gt_depth)
    _, scale, shift = align_depth_least_square(gt_arr=test_gt_depth,pred_arr=norm_depth,valid_mask_arr=valid_mask)
    pred_depth = pred_depth * 2 - 1
    pred_depth = pred_depth * scale + shift
    if erase and gt_depth.min() < eps and gt_depth.min() > -eps:
        pred_depth[gt_depth < eps] = 0
    return pred_depth.cpu().numpy()