import numpy as np
import cv2
from torchvision.transforms import Compose
import torch


class Resize(object):
    """Resize sample to given size (width, height).
    """

    def __init__(
        self,
        width,
        height,
        resize_target=True,
        keep_aspect_ratio=False,
        ensure_multiple_of=1,
        resize_method="lower_bound",
        image_interpolation_method=cv2.INTER_AREA,
    ):
        """Init.

        Args:
            width (int): desired output width
            height (int): desired output height
            resize_target (bool, optional):
                True: Resize the full sample (image, mask, target).
                False: Resize image only.
                Defaults to True.
            keep_aspect_ratio (bool, optional):
                True: Keep the aspect ratio of the input sample.
                Output sample might not have the given width and height, and
                resize behaviour depends on the parameter 'resize_method'.
                Defaults to False.
            ensure_multiple_of (int, optional):
                Output width and height is constrained to be multiple of this parameter.
                Defaults to 1.
            resize_method (str, optional):
                "lower_bound": Output will be at least as large as the given size.
                "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
                "minimal": Scale as least as possible.  (Output size might be smaller than given size.)
                Defaults to "lower_bound".
        """
        self.__width = width
        self.__height = height

        self.__resize_target = resize_target
        self.__keep_aspect_ratio = keep_aspect_ratio
        self.__multiple_of = ensure_multiple_of
        self.__resize_method = resize_method
        self.__image_interpolation_method = image_interpolation_method

    def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
        y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)

        if max_val is not None and y > max_val:
            y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)

        if y < min_val:
            y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)

        return y

    def get_size(self, width, height):
        # determine new height and width
        scale_height = self.__height / height
        scale_width = self.__width / width

        if self.__keep_aspect_ratio:
            if self.__resize_method == "lower_bound":
                # scale such that output size is lower bound
                if scale_width > scale_height:
                    # fit width
                    scale_height = scale_width
                else:
                    # fit height
                    scale_width = scale_height
            elif self.__resize_method == "upper_bound":
                # scale such that output size is upper bound
                if scale_width < scale_height:
                    # fit width
                    scale_height = scale_width
                else:
                    # fit height
                    scale_width = scale_height
            elif self.__resize_method == "minimal":
                # scale as least as possbile
                if abs(1 - scale_width) < abs(1 - scale_height):
                    # fit width
                    scale_height = scale_width
                else:
                    # fit height
                    scale_width = scale_height
            else:
                raise ValueError(f"resize_method {self.__resize_method} not implemented")

        if self.__resize_method == "lower_bound":
            new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
            new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
        elif self.__resize_method == "upper_bound":
            new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
            new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
        elif self.__resize_method == "minimal":
            new_height = self.constrain_to_multiple_of(scale_height * height)
            new_width = self.constrain_to_multiple_of(scale_width * width)
        else:
            raise ValueError(f"resize_method {self.__resize_method} not implemented")

        return (new_width, new_height)

    def __call__(self, sample):
        if 'image' in sample:
            width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0])
            
            # resize sample
            sample["image"] = cv2.resize(sample["image"], (width, height), interpolation=self.__image_interpolation_method)
        elif 'depth' in sample:
            width, height = self.get_size(sample["depth"].shape[1], sample["depth"].shape[0])

        if self.__resize_target:
            if "depth" in sample:
                sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST)
                
            if "disparity" in sample:
                sample["disparity"] = cv2.resize(sample["disparity"], (width, height), interpolation=cv2.INTER_NEAREST)
                
            if "mask" in sample:
                sample["mask"] = cv2.resize(sample["mask"].astype(np.float32), (width, height), interpolation=cv2.INTER_NEAREST)
        
        return sample


class NormalizeImage(object):
    """Normlize image by given mean and std.
    """

    def __init__(self, mean, std):
        self.__mean = mean
        self.__std = std

    def __call__(self, sample):
        sample["image"] = (sample["image"] - self.__mean) / self.__std

        return sample


class PrepareForNet(object):
    """Prepare sample for usage as network input.
    """

    def __init__(self):
        pass

    def __call__(self, sample):
        # image = np.transpose(sample["image"], (2, 0, 1))
        sample["image"] = np.ascontiguousarray(sample["image"]).astype(np.float32)

        if "depth" in sample:
            depth = sample["depth"].astype(np.float32)
            sample["depth"] = np.ascontiguousarray(depth)
        
        if "mask" in sample:
            sample["mask"] = sample["mask"].astype(np.float32)
            sample["mask"] = np.ascontiguousarray(sample["mask"])
        
        return sample

# 输入是tensor或ndarray，image的范围是[0, 255], disparity, mask(0或1或3通道), 返回的都是三通道, 是tensor
def sample2tensor(raw_image, raw_disparity, raw_mask, input_size=518, max_dispairty=20):

    use_batch = int(len(raw_image.shape) == 4)
    
    transform = Compose([
        Resize(
            width=input_size,
            height=input_size,
            resize_target=True,
            keep_aspect_ratio=True,
            ensure_multiple_of=14,
            resize_method='lower_bound',
            image_interpolation_method=cv2.INTER_CUBIC,
        ),
        NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        PrepareForNet(),
    ])
    
    # 读入的图片是RGB格式， 所以注释下面一行
    # image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0
    
    # Normalize image to [0, 1]
    raw_image = raw_image / 255.
    raw_mask = raw_mask/raw_mask.max()
    
    raw_disparity = raw_disparity.clip(0,max_dispairty)
    raw_disparity = (raw_disparity - raw_disparity.min())/(raw_disparity.max()-raw_disparity.min())
    
    if isinstance(raw_image, torch.Tensor):
        raw_image = raw_image.cpu().numpy()
    if isinstance(raw_disparity, torch.Tensor):
        raw_disparity = raw_disparity.cpu().numpy()
    if isinstance(raw_mask, torch.Tensor):
        raw_mask = raw_mask.cpu().numpy()
    
    if len(raw_disparity.shape) == 2+use_batch:
        raw_disparity = np.expand_dims(raw_disparity, axis=-1)
    if len(raw_mask.shape) == 2+use_batch:
        raw_mask = np.expand_dims(raw_mask, axis=-1)
    
    if raw_disparity.shape[-1] == 1:
        raw_disparity = np.concatenate([raw_disparity]*3, axis=-1)
    if raw_mask.shape[-1] == 1:
        raw_mask = np.concatenate([raw_mask]*3, axis=-1)
    
    if use_batch:
        samples = []
        for i in range(raw_image.shape[0]):
            sample = transform({'image': raw_image[i], 'disparity': raw_disparity[i], 'mask': raw_mask[i]})
            samples.append(sample)
        
        # Stack samples into a batch
        samples = {
            'image': torch.stack([torch.from_numpy(s['image']) for s in samples]),
            'disparity': torch.stack([torch.from_numpy(s['disparity']) for s in samples]),
            'mask': torch.stack([torch.from_numpy(s['mask']) for s in samples]),
        }
    else:
        samples = transform({
             'image': raw_image,
             'disparity': raw_disparity,
             'mask': raw_mask,
             })
        
        samples = {
            'image': torch.from_numpy(samples['image']),
            'disparity': torch.from_numpy(samples['disparity']),
            'mask': torch.from_numpy(samples['mask']),
        }
        # # 根据normallize进行scale
        # depth = (depth - [0.485, 0.456, 0.406])*normal_scale + [0.485, 0.456, 0.406]
        # DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
        # for key in ['image', 'disparity', 'mask']:
        #     samples[key] = samples[key].to(DEVICE)
    # image = image.to(DEVICE)
    
    
    
    return samples

def align_depth_least_square(
    gt_arr: np.ndarray,
    pred_arr: np.ndarray,
    valid_mask_arr: np.ndarray,
    return_scale_shift=True,
    max_resolution=None,
):
    ori_shape = pred_arr.shape  # input shape

    gt = gt_arr.squeeze()  # [H, W]
    pred = pred_arr.squeeze()
    valid_mask = valid_mask_arr.squeeze()

    # Downsample
    if max_resolution is not None:
        scale_factor = np.min(max_resolution / np.array(ori_shape[-2:]))
        if scale_factor < 1:
            downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
            gt = downscaler(torch.as_tensor(gt).unsqueeze(0)).numpy()
            pred = downscaler(torch.as_tensor(pred).unsqueeze(0)).numpy()
            valid_mask = (
                downscaler(torch.as_tensor(valid_mask).unsqueeze(0).float())
                .bool()
                .numpy()
            )

    assert (
        gt.shape == pred.shape == valid_mask.shape
    ), f"{gt.shape}, {pred.shape}, {valid_mask.shape}"

    gt_masked = gt[valid_mask].reshape((-1, 1))
    pred_masked = pred[valid_mask].reshape((-1, 1))

    # numpy solver
    _ones = np.ones_like(pred_masked)
    A = np.concatenate([pred_masked, _ones], axis=-1)
    X = np.linalg.lstsq(A, gt_masked, rcond=None)[0]
    scale, shift = X

    aligned_pred = pred_arr * scale + shift

    # restore dimensions
    aligned_pred = aligned_pred.reshape(ori_shape)

    if return_scale_shift:
        return aligned_pred, scale, shift
    else:
        return aligned_pred