import numpy as np
import torch
import cv2

# Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
# More information about Marigold:
#   https://marigoldmonodepth.github.io
#   https://marigoldcomputervision.github.io
# Efficient inference pipelines are now part of diffusers:
#   https://huggingface.co/docs/diffusers/using-diffusers/marigold_usage
#   https://huggingface.co/docs/diffusers/api/pipelines/marigold
# Examples of trained models and live demos:
#   https://huggingface.co/prs-eth
# Related projects:
#   https://rollingdepth.github.io/
#   https://marigolddepthcompletion.github.io/
# Citation (BibTeX):
#   https://github.com/prs-eth/Marigold#-citation
# If you find Marigold useful, we kindly ask you to cite our papers.
# --------------------------------------------------------------------------

import logging
import torch



class DepthNormalizerBase:
    is_absolute = None
    far_plane_at_max = None

    def __init__(
        self,
        norm_min=-1.0,
        norm_max=1.0,
    ) -> None:
        self.norm_min = norm_min
        self.norm_max = norm_max
        raise NotImplementedError

    def __call__(self, depth, valid_mask=None, clip=None):
        raise NotImplementedError

    def denormalize(self, depth_norm, **kwargs):
        # For metric depth: convert prediction back to metric depth
        # For relative depth: convert prediction to [0, 1]
        raise NotImplementedError


class ScaleShiftDepthNormalizer(DepthNormalizerBase):
    """
    Use near and far plane to linearly normalize depth,
        i.e. d' = d * s + t,
        where near plane is mapped to `norm_min`, and far plane is mapped to `norm_max`
    Near and far planes are determined by taking quantile values.
    """

    is_absolute = False
    far_plane_at_max = True

    def __init__(
        self, norm_min=-1.0, norm_max=1.0, min_max_quantile=0.02, clip=True
    ) -> None:
        self.norm_min = norm_min
        self.norm_max = norm_max
        self.norm_range = self.norm_max - self.norm_min
        self.min_quantile = min_max_quantile
        self.max_quantile = 1.0 - self.min_quantile
        self.clip = clip

    def __call__(self, depth_linear, valid_mask=None, clip=None):
        clip = clip if clip is not None else self.clip

        if valid_mask is None:
            valid_mask = torch.ones_like(depth_linear).bool()
        valid_mask = valid_mask & (depth_linear > 0)

        # Take quantiles as min and max
        _min, _max = torch.quantile(
            depth_linear[valid_mask],
            torch.tensor([self.min_quantile, self.max_quantile]),
        )

        # scale and shift
        depth_norm_linear = (depth_linear - _min) / (
            _max - _min
        ) * self.norm_range + self.norm_min

        if clip:
            depth_norm_linear = torch.clip(
                depth_norm_linear, self.norm_min, self.norm_max
            )

        return depth_norm_linear

    def scale_back(self, depth_norm):
        # scale to [0, 1]
        depth_linear = (depth_norm - self.norm_min) / self.norm_range
        return depth_linear

    def denormalize(self, depth_norm, **kwargs):
        logging.warning(f"{self.__class__} is not revertible without GT")
        return self.scale_back(depth_norm=depth_norm)


class ScaleShiftDisparityNormalizer(ScaleShiftDepthNormalizer):


    def __init__(
        self,
        norm_min=-1.0,
        norm_max=1.0,
        min_max_quantile=0.02,
        clip=True,
    ) -> None:
        super().__init__(
            norm_min=norm_min,
            norm_max=norm_max,
            min_max_quantile=min_max_quantile,
            clip=clip,
        )

    def __call__(self, depth_linear, valid_mask=None, clip=None):
        
        assert (depth_linear[valid_mask]  == 0 ).sum() ==0 ,'there are sth wrong in this depth map'
        depth_linear = 1.0 / (depth_linear)

        return super().__call__(
            depth_linear=depth_linear,
            valid_mask=valid_mask,
            clip=clip,
        )
        

        


def HWC3(x):
    assert x.dtype == np.uint8
    if x.ndim == 2:
        x = x[:, :, None]
    assert x.ndim == 3
    H, W, C = x.shape
    assert C == 1 or C == 3 or C == 4
    if C == 3:
        return x
    if C == 1:
        return np.concatenate([x, x, x], axis=2)
    if C == 4:
        color = x[:, :, 0:3].astype(np.float32)
        alpha = x[:, :, 3:4].astype(np.float32) / 255.0
        y = color * alpha + 255.0 * (1.0 - alpha)
        y = y.clip(0, 255).astype(np.uint8)
        return y


def resize_image(input_image, resolution):
    H, W, C = input_image.shape
    H = float(H)
    W = float(W)
    k = float(resolution) / min(H, W)
    H *= k
    W *= k
    H = int(np.round(H / 64.0)) * 64
    W = int(np.round(W / 64.0)) * 64
    img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
    return img


# normalize
def norm_normalize(norm_out):
    norm_x, norm_y, norm_z = torch.split(norm_out, 1, dim=0)
    norm = torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10
    final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm], dim=0)
    fg_mask = torch.ones_like(norm).repeat(3, 1, 1)
    fg_mask[norm.repeat(3, 1, 1) < 0.5] = 0.
    fg_mask[norm.repeat(3, 1, 1) > 1.5] = 0.

    final_out[norm.repeat(3, 1, 1) < 0.5] = -1
    final_out[norm.repeat(3, 1, 1) > 1.5] = -1
    return final_out, fg_mask.bool()


def center_crop(input_image):
    height, width = input_image.shape[:2]

    if height < width:
        min_dim = height
    else:
        min_dim = width

    center_x = width // 2
    center_y = height // 2
    half_length = min_dim // 2

    crop_x1 = center_x - half_length
    crop_x2 = center_x + half_length
    crop_y1 = center_y - half_length
    crop_y2 = center_y + half_length

    center_cropped_image = input_image[crop_y1:crop_y2, crop_x1:crop_x2]

    return center_cropped_image


def flip_x(normal):

    if isinstance(normal, np.ndarray):
        return normal.dot(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])).astype(np.float32)
    else:
        trans = torch.tensor([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]).float()
        return  normal @ trans





if __name__ == "__main__":

    # normalizer = ScaleShiftDepthNormalizer(
    #     norm_min=-1,
    #     norm_max=1,
    #     min_max_quantile=0.02,
    #     clip=True,
    # )



    normalizer = ScaleShiftDisparityNormalizer(
        norm_min=-1,
        norm_max=1,
        min_max_quantile=0.02,
        clip=True,
    )

    print("normalizer", type(normalizer))


    from dataset import NaiveDepthDataloder

    resolution = 512
    dataset = NaiveDepthDataloder(json_files=['data/cleargrasp_processed/train.jsonl'], resolution=resolution)
    from loguru import logger 
    
    # sampled_idx = np.random.choice(dataset.__len__(), 5)
    sampled_idx = list(range(5))
    for idx in sampled_idx:
        original_rgb, original_depth = dataset.tmp_load(idx)
        original_depth_normalized = ((original_depth - original_depth.min()) / (original_depth.max() - original_depth.min()))
        original_depth_normalized = torch.from_numpy(original_depth_normalized).float()

        logger.info(f"{original_depth_normalized.max(),original_depth_normalized.min(),original_depth_normalized.mean(),original_depth_normalized.median()}")
        merged = np.concatenate([original_rgb, original_depth_normalized.numpy() * 255 ], axis=0)
        cv2.imwrite("original.png", merged)
        


        depth_new_normalized = normalizer(torch.from_numpy(original_depth))
        
        logger.info(f"{depth_new_normalized.max(),depth_new_normalized.min(),depth_new_normalized.mean(),depth_new_normalized.median()}")

        test_depth_new_normalized = (depth_new_normalized + 1) /2 

        logger.info(f"{test_depth_new_normalized.max(),test_depth_new_normalized.min(),test_depth_new_normalized.mean(),test_depth_new_normalized.median()}")


        
        merged = np.concatenate([original_rgb, test_depth_new_normalized.numpy() * 255 ], axis=0)
        cv2.imwrite("after.png", merged)
        break

        

        