#%%
from typing import Any, Dict, List, Optional, Tuple

import hydra
import pytorch_lightning as L
import rootutils
from sympy import im
import torch
import signal  # noqa: F401
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import Logger
from omegaconf import DictConfig, OmegaConf


rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
# ------------------------------------------------------------------------------------ #
from fast3r.dust3r.loss.vggt_loss import normalize_pointcloud
from fast3r.utils import (
    RankedLogger,
    extras,
    get_metric_value,
    instantiate_callbacks,
    instantiate_loggers,
    log_hyperparameters,
    task_wrapper,
)

log = RankedLogger(__name__, rank_zero_only=True)

def python_eval_resolver(code: str):
    return eval(code)

import os
# Set the environment variable before Hydra initializes
os.environ['HYDRA_FULL_ERROR'] = '1'
# Register the resolver with OmegaConf
# usage: ${python_code:1 + 1} in yaml
OmegaConf.register_new_resolver("python_eval", python_eval_resolver)

with hydra.initialize(version_base="1.3", config_path="../configs"):
    cfg = hydra.compose(config_name="train.yaml")
    # print(OmegaConf.to_yaml(cfg))
# %%
datamodule: LightningDataModule = hydra.utils.instantiate(cfg.data.data_module)
train_dataloader = datamodule.train_dataloader()
# test_dataloader = datamodule.test_dataloader()
print(train_dataloader)
# %%
batch = next(iter(train_dataloader))
# batch = batch[0]
#%%
img = batch[0]['img']
print(img.shape)
print(torch.max(img), torch.mean(img))
    #   , img.min(dim=(-3, -2, -1)))
#%%
# varify normalize p3d

gt_pts = torch.stack([view["pts3d"] for view in batch], dim=1)
mask_gt = torch.stack([view["valid_mask"] for view in batch], dim=1)
print(gt_pts.shape, mask_gt.shape)
gt_pts_normed, norm_factor_pr = normalize_pointcloud(gt_pts, mask_gt)
print(gt_pts_normed.shape, norm_factor_pr.shape, norm_factor_pr)
gt_pts_normedi = torch.norm(gt_pts_normed, dim=-1)*mask_gt
print(gt_pts_normedi.shape)
gt_pts_mean = gt_pts_normedi.sum(dim=[1,2,3])/mask_gt.sum(dim=[1,2,3])
print(gt_pts_mean)
#%%
# from fast3r.dust3r.utils.geometry import geotrf
# view = batch[0]
# for k in ['img', 'depthmap', 'camera_pose', 'camera_intrinsics', 'pts3d', 'valid_mask']:
#     print(k, view[k].shape)
# print(view['camera_intrinsics'])
# print(view['camera_pose'])

#%%
from fast3r.dust3r.loss.pose_loss import CameraIntrinsicLoss
from fast3r.dust3r.heads.camera import get_local_pts3d_from_depth
from fast3r.dust3r.utils.geometry import inv, depthmap_to_camera_coordinates, depthmap_to_absolute_camera_coordinates
import numpy as np
i = 0
view = batch[i]
fuv_scaler = 1
fuv_gt = CameraIntrinsicLoss.intri_to_encoding(view["camera_intrinsics"])/fuv_scaler # (B, 2)
print(fuv_gt.shape)
pts_3d_camera = get_local_pts3d_from_depth(view['depthmap'], fuv_gt, fuv_scaler) 
inv_matrix_local = inv(view["camera_pose"].float())
gt_pts = geotrf(inv_matrix_local, view["pts3d"])
mask = view['valid_mask']
print(gt_pts.shape)
print(mask.sum(), (~mask).sum())

absolute_difference = torch.abs(pts_3d_camera[mask] - gt_pts[mask])
print(f"Maximum absolute difference between point clouds: {torch.max(absolute_difference):.6e}")
for j in range(gt_pts.shape[0]):
    mask0 = mask[j]
    X_cam, valid_mask = depthmap_to_camera_coordinates(view['depthmap'][j].numpy(), view["camera_intrinsics"][j].numpy())

    absolute_difference = np.abs(X_cam[mask0] - gt_pts[j][mask0].numpy())
    print(f"Maximum absolute difference between point clouds: {np.max(absolute_difference):.6e}")
    # print((mask0.numpy() != valid_mask).sum())
#%%
pts_3d_world = geotrf(view['camera_pose'],pts_3d_camera)
np_pts_world = []
for j in range(gt_pts.shape[0]):
    X_cam, valid_mask = depthmap_to_absolute_camera_coordinates(view['depthmap'][j].numpy(), view["camera_intrinsics"][j].numpy(), view["camera_pose"][j].numpy())
    np_pts_world.append(X_cam)
np_pts_world = np.stack(np_pts_world, axis=0)
print(np_pts_world.shape)
absolute_difference = torch.abs(pts_3d_world[mask] - view["pts3d"][mask])
print(f"Maximum absolute difference between point clouds: {torch.max(absolute_difference):.6e}")
absolute_difference = np.abs(pts_3d_world[mask].numpy() - np_pts_world[mask])
print(f"Maximum absolute difference between point clouds: {np.max(absolute_difference):.6e}")
absolute_difference = np.abs(view["pts3d"][mask].numpy() - np_pts_world[mask])
print(f"Maximum absolute difference between point clouds: {np.max(absolute_difference):.6e}")

# print(X_cam.shape, gt_pts[0][mask].numpy().shape)

# %%
sample = [v['pts3d'][0] for v in batch]
print(sample[0].shape, len(sample))
from scipy.spatial import cKDTree
import pandas as pd
from fast3r.eval.utils import calculate_close_points_percentage, compare_views_by_points, calculate_intra_view_proximity
threshold = 0.1 
# percent_close = calculate_close_points_percentage(sample[0], sample[1], distance_threshold=threshold)
efficient_results = compare_views_by_points(sample, threshold)
df = pd.DataFrame(efficient_results)
df.columns = [f"Tgt View {k}" for k in range(len(sample))]
df.index = [f"Src View {k}" for k in range(len(sample))]

print("\n\n" + "="*50)
print("     Overlap Percentage Matrix (Efficiently Computed)")
print("="*50)
display(df.style.format("{:.2f}%").background_gradient(cmap='viridis', axis=None))
#%%

threshold = 0.1 
percent_close = calculate_intra_view_proximity(sample[2], distance_threshold=threshold)
print(f"\nFinal Percentage: {percent_close:.6f}% of pairs are closer than {threshold}")

#%%
# 
# print(len(batch), batch[1], batch[2])
print(list(batch[0].keys()))
cams = [view['camera_intrinsics'] for view in batch]
cams = torch.stack(cams, dim=1)

#%%

print(cams[0].shape)
print(cams[0])
v = torch.var(cams[0].view(cams.shape[1], -1), dim=0)
print(v.shape)
print(v)

#%%
print(view['camera_pose'])
print(view['depthmap'].max())
#%%
from fast3r.dust3r.loss.pose_loss import CameraIntrinsicLoss
from fast3r.dust3r.heads.camera import get_local_pts3d_from_depth
fuv = CameraIntrinsicLoss.intri_to_encoding(view['camera_intrinsics'])
print(fuv.shape)
#%%
pts_3d_camera = get_local_pts3d_from_depth(view['depthmap'], fuv, fuv_scaler=1.0)
print(pts_3d_camera.shape)
#%%
pts_3d_world = geotrf(view['camera_pose'], pts_3d_camera)
print(pts_3d_world.shape)
# %%
are_they_close = torch.allclose(view['pts3d'], pts_3d_world, atol=1e-5)
print(are_they_close)
absolute_difference = torch.abs(view['pts3d'] - pts_3d_world)
print(f"{torch.max(absolute_difference):.6e}", torch.mean(absolute_difference))

# %%
import numpy as np
def unproject_to_world(depth_map, K, pose_c2w):
    # H_img, W_img = depth_map.shape
    # K_inv = np.linalg.inv(K)

    # yy, xx = np.meshgrid(np.arange(H_img), np.arange(W_img), indexing='ij')
    # pixel_homog = np.stack([xx, yy, np.ones_like(xx)], axis=0)  # (3, H, W)

    # # Camera points
    # cam_pts = np.einsum('ij,jhw->ihw', K_inv, pixel_homog)  # (3, H, W)
    # cam_pts *= depthmap[np.newaxis,:,:]  # (3, H, W)

    # # World points under original pose
    # R_c2w = pose_c2w[:3,:3]
    # t_c2w = pose_c2w[:3,3]

    # pts3d_reproj = np.einsum('ij,jhw->ihw', R_c2w, cam_pts) + t_c2w[:,np.newaxis,np.newaxis]  # (3, H, W)
    # return pts3d_reproj.transpose(1, 2, 0)  # (3, H, W)
    """
    Unprojects a depth map to a 3D point cloud in world coordinates.

    Args:
        depth_map (np.array): (H, W) array of depth values.
        intrinsic_matrix (np.array): 3x3 camera intrinsic matrix.
        c2w_pose (np.array): 4x4 camera-to-world pose matrix.

    Returns:
        np.array: (H, W, 3) array of 3D point coordinates in the world frame.
    """
    height, width = depth_map.shape
    K_inv = np.linalg.inv(K)

    # Create a grid of pixel coordinates (u, v)
    v, u = np.indices((height, width))
    
    # Stack into homogeneous pixel coordinates (u, v, 1)
    # Shape: (H, W, 3)
    pixel_coords = np.stack((u, v, np.ones_like(u)), axis=-1).astype(np.float32)

    # Transform pixel coordinates to normalized camera coordinates (x/z, y/z, 1)
    # np.einsum is an efficient way to apply the matrix to each pixel vector
    # 'ij,hwj->hwi' means: for each pixel (h,w), multiply K_inv (ij) by the pixel vector (j)
    # Shape: (H, W, 3)
    camera_normalized_coords = np.einsum('ij,hwj->hwi', K_inv, pixel_coords)

    # Scale by depth to get 3D points in the camera's coordinate system
    # We use depth_map[..., np.newaxis] to broadcast the (H, W) depth values
    # across the 3 coordinates of each camera point.
    # Shape: (H, W, 3)
    camera_coords = camera_normalized_coords * depth_map[..., np.newaxis]

    # Convert to homogeneous coordinates (x, y, z, 1) to apply the pose matrix
    # Shape: (H, W, 4)
    camera_coords_homo = np.concatenate(
        (camera_coords, np.ones_like(depth_map[..., np.newaxis])), axis=-1
    )

    # Transform points from camera space to world space using the C2W pose
    # Shape: (H, W, 4)
    world_coords_homo = np.einsum('ij,hwj->hwi', pose_c2w, camera_coords_homo)

    # Convert back from homogeneous to 3D coordinates by dividing by the last component (w)
    # We add a small epsilon to avoid division by zero, although it's unlikely with a valid pose.
    w = world_coords_homo[..., 3:]
    world_coords = world_coords_homo[..., :3] / w

    return world_coords, camera_coords
# %%
pts3d_from_np = [unproject_to_world(view['depthmap'].numpy()[i], view['camera_intrinsics'].numpy()[i], view['camera_pose'].numpy()[i]) for i in range(view['depthmap'].shape[0])]
pts3d_local_from_np = [p[1] for p in pts3d_from_np]
pts3d_from_np = [p[0] for p in pts3d_from_np]
pts3d_from_np = np.stack(pts3d_from_np, axis=0)
pts3d_local_from_np = np.stack(pts3d_local_from_np, axis=0)
print(pts3d_from_np.shape)
#%%
absolute_difference = np.abs(pts_3d_camera.numpy() - pts3d_local_from_np)

# Use np.allclose for a robust floating-point comparison
are_they_close = np.allclose(pts_3d_camera.numpy(), pts3d_local_from_np)

print(f"Are the two point clouds numerically close? -> {are_they_close}")
print(f"Maximum absolute difference between local point clouds: {np.max(absolute_difference):.6e}")
# %%
absolute_difference = np.abs(view['pts3d'].numpy() - pts3d_from_np)

# Use np.allclose for a robust floating-point comparison
are_they_close = np.allclose(view['pts3d'].numpy(), pts3d_from_np)

print(f"Are the two point clouds numerically close? -> {are_they_close}")
print(f"Maximum absolute difference between point clouds: {np.max(absolute_difference):.6e}")
# %%
ds = train_dataloader.dataset
print(ds)
views = ds[(8, 1, 5)]
# %%
view_ds = views[0]
K = view_ds["camera_intrinsics"].astype(np.float32)
pts3d = view_ds["pts3d"].astype(np.float32)
depthmap = view_ds["depthmap"].astype(np.float32)
pose = view_ds["camera_pose"].astype(np.float32)
img = view_ds["img"]
H, W = view_ds["true_shape"]

# %%
print(img.shape)
print(depthmap.shape)
print(pts3d.shape)
print(K)
print(pose)
# %%
pts3d_from_original = unproject_to_world(depthmap, K, pose)
absolute_diff_reproj = np.abs(pts3d_from_original - pts3d)

print(f"Mean error (original reprojection vs groundtruth pts3d): {np.max(absolute_diff_reproj):.6e} meters")
# %%
