import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
from utils import read_d, read_calib
import argparse


calib_descriptions = {
    "mtxL": "Camera Matrix Left Pre-Rectification",
    "distL": "Distorsion Left Pre-Rectification",
    "rectL": "Rectification Transformation for Left Camera",
    "proj_matL": "Camera Matrix Post-Rectification (Use this)",
    "wL": "Left Image Width",
    "hL": "Left Image Height",
    "mtxR": "Camera Matrix Right Pre-Rectification",
    "distR": "Distorsion Right Pre-Rectification",
    "rectR": "Rectification Right Transformation for Right Camera",
    "proj_matR": "Camera Matrix Right Post-Rectification (Use this)",
    "wR": "Right Image Width",
    "hR": "Right Image Height",
    "f_disp": "Focal Lenght (Post Rectification)",
    "cx": "CX (Post Rectification)",
    "cy": "CY (Post Rectification)",
    "b_disp": "Baseline Left-Right in meters"
}




def colorize_depth_maps(
        depth_map, min_depth, max_depth, cmap="Spectral", valid_mask=None
    ):
        """
        Colorize depth maps.
        """
        assert len(depth_map.shape) >= 2, "Invalid dimension"

        if isinstance(depth_map, torch.Tensor):
            depth = depth_map.detach().clone().squeeze().numpy()
        elif isinstance(depth_map, np.ndarray):
            depth = depth_map.copy().squeeze()
        # reshape to [ (B,) H, W ]
        if depth.ndim < 3:
            depth = depth[np.newaxis, :, :]

        # colorize
        cm = matplotlib.colormaps[cmap]
        depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
        img_colored_np = cm(depth, bytes=False)[:, :, :, 0:3]  # value from 0 to 1
        img_colored_np = np.rollaxis(img_colored_np, 3, 1)

        if valid_mask is not None:
            if isinstance(depth_map, torch.Tensor):
                valid_mask = valid_mask.detach().numpy()
            valid_mask = valid_mask.squeeze()  # [H, W] or [B, H, W]
            if valid_mask.ndim < 3:
                valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
            else:
                valid_mask = valid_mask[:, np.newaxis, :, :]
            valid_mask = np.repeat(valid_mask, 3, axis=1)
            img_colored_np[~valid_mask] = 0

        if isinstance(depth_map, torch.Tensor):
            img_colored = torch.from_numpy(img_colored_np).float()
        elif isinstance(depth_map, np.ndarray):
            img_colored = img_colored_np

        return img_colored


    
def chw2hwc(chw):
    assert 3 == len(chw.shape)
    if isinstance(chw, torch.Tensor):
        hwc = torch.permute(chw, (1, 2, 0))
    elif isinstance(chw, np.ndarray):
        hwc = np.moveaxis(chw, 0, -1)
    return hwc

def depth2colordepth(gt_depth):
    value_colored = colorize_depth_maps((gt_depth  - gt_depth.min()) / (gt_depth.max() - gt_depth.min()), 0, 1, cmap="Spectral").squeeze()
    value_colored = (value_colored * 255).astype(np.uint8)

    value_colored = chw2hwc(value_colored)
    # value_colored = Image.fromarray(chw2hwc(value_colored))

    return value_colored


def visualize_sample(scene_path, basepath="im0"):
    left_path = os.path.join(scene_path, "camera_00", basepath + ".png") # Left Image Rectified
    right_path = os.path.join(scene_path, "camera_02", basepath + ".png") # Right Image Rectified
    disparity_left_path = os.path.join(scene_path, "disp_00.npy") # Disparity aligned with Left Image
    disparity_right_path = os.path.join(scene_path, "disp_02.npy") # Disparity aligned with Right Image
    calib_path = os.path.join(scene_path, "calib_00-02.xml") # Calibration file
    occlusion_mask_left_path = os.path.join(scene_path, "mask_00.png") # Occlusion Mask aligned with Left Image. Value 1: Not occluded. Value 0: Occluded or Invalid
    occlusion_mask_right_path = os.path.join(scene_path, "mask_02.png") # Occlusion Mask aligned with Right Image. Value 1: Not occluded. Value 0: Occluded or Invalid
    category_mask_left_path = os.path.join(scene_path, "mask_cat.png") # Category Map aligned with Left Image. Not available for the right image. Value 0: Other surfaces. Value 255: Transparent or Mirror surfaces.

    calib_params = read_calib(calib_path)
    print("Calibration Parameters:")
    for p in calib_params:
        print("{}\t{}\n{}".format(p, calib_descriptions[p], calib_params[p]))
    
    left = plt.imread(left_path)
    right = plt.imread(right_path)

    disparity_left = read_d(disparity_left_path)
    disparity_right = read_d(disparity_right_path)

    occlusion_mask_left = cv2.imread(occlusion_mask_left_path)
    occlusion_mask_right = cv2.imread(occlusion_mask_right_path)

    category_mask_left = cv2.imread(category_mask_left_path)

    plt.subplot(2,4,1)
    plt.imshow(left)
    plt.title("Left")

    plt.subplot(2,4,2)
    plt.imshow(disparity_left, cmap="jet")
    plt.title("Disparity Left")

    plt.subplot(2,4,3)
    plt.imshow(occlusion_mask_left, cmap="gray")
    plt.title("Occlusion Mask Left")

    plt.subplot(2,4,4)
    plt.imshow(category_mask_left, cmap="gray")
    plt.title("Category Mask Left")

    plt.subplot(2,4,5)
    plt.imshow(right)
    plt.title("Right")

    plt.subplot(2,4,6)
    plt.imshow(disparity_right)
    plt.title("Disparity Right")

    plt.subplot(2,4,7)
    plt.imshow(occlusion_mask_right)
    plt.title("Occlusion Mask Right")

    # plt.show()
    plt.savefig('visualization.png', bbox_inches='tight', dpi=300)

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Argument parser')
    parser.add_argument('--scene_path', default="train/Bathroom", type=str, help='path to a Booster scene')
    args = parser.parse_args()
    visualize_sample(args.scene_path)