#%%
import hydra
import sys
import os
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import LightningDataModule, LightningModule, Trainer

sys.path.insert(0, '/lc/code/3D/a3R')

def python_eval_resolver(code: str):
    return eval(code)


OmegaConf.register_new_resolver("python_eval", python_eval_resolver)

version_base="1.3"
config_path="../configs"
config_name="eval.yaml"

with hydra.initialize(version_base="1.3", config_path=config_path):
    cfg = hydra.compose(
        config_name=config_name
    )
print(f"Type of cfg: {type(cfg)}")

print(f"Is cfg an instance of DictConfig? {isinstance(cfg, DictConfig)}")
print(f"Is cfg an instance of OmegaConf? {isinstance(cfg, OmegaConf)}")
# %%
print(cfg.data.data_module._target_)
print(sys.path)
datamodule: LightningDataModule = hydra.utils.instantiate(cfg.data.data_module)
# %%
test_loader = datamodule.test_dataloader()
# print(len(test_loader))
# %%
import torch
import torchvision
import matplotlib.pyplot as plt
import numpy as np
# %%
batches, _, _ = next(iter(test_loader))

print(len(batches))
print(list(batches[0].keys()))
print(batches[0]['img'].shape)
#%%
img = batches[0]['img'][0]
print(img.max(), img.mean())
#%%
camera_pose = [b['camera_pose'][0] for b in batches]
camera_intrinsics = [b['camera_intrinsics'][0] for b in batches]
print(camera_pose[0].shape)
print(camera_intrinsics[0].shape)
#%%
for pose in camera_pose:
    print(pose)
    break
#%%
from fast3r.dust3r.utils.geometry import inv
in_camera1 = inv(camera_pose[0]) 
print(camera_pose[0])
print(in_camera1)
#%%
camera_pose = [in_camera1 @ p for p in camera_pose]
for pose in camera_pose:
    print(pose)
    # break
#%%
for k, v in batches[0].items():
    if isinstance(v, list):
        print(k,'list', v[0])
    elif len(v.shape) == 1:
        print(k, v)
    else:
        print(k, v.shape)
#%%
def world_to_camera_coordinates_batch(world_points_batch, c2w_poses_batch):
    """
    Transforms a batch of 3D world points (e.g., from depth maps) to 3D camera coordinates.

    Args:
        world_points_batch (np.ndarray): A NumPy array of shape (B, H, W, 3)
                                         representing 3D world points for each pixel.
        c2w_poses_batch (np.ndarray): A NumPy array of shape (B, 4, 4)
                                      representing camera-to-world homogeneous
                                      transformation matrices for each batch item.

    Returns:
        np.ndarray: A NumPy array of shape (B, H, W, 3) representing
                    the 3D points in camera coordinates (Xc, Yc, Zc).
    """

    # 1. Prepare the world points for batch matrix multiplication
    # Original shape: (B, H, W, 3)
    # Add homogeneous coordinate (1.0) to the last dimension
    # New shape: (B, H, W, 4)
    world_points_homogeneous = np.concatenate(
        [world_points_batch, np.ones(world_points_batch.shape[:-1] + (1,))],
        axis=-1
    )

    # Reshape world_points_homogeneous to (B, H, W, 4, 1)
    # This treats each (X, Y, Z, 1) as a 4x1 column vector for multiplication
    world_points_homogeneous_column_vectors = world_points_homogeneous[..., np.newaxis]
    # Shape now: (B, H, W, 4, 1)

    # 2. Calculate w2c_pose for each batch item
    # np.linalg.inv supports batch inversion when leading dimensions match
    w2c_poses_batch = np.linalg.inv(c2w_poses_batch)
    # Shape: (B, 4, 4)

    # 3. Perform batch matrix multiplication
    # (B, 4, 4) @ (B, H, W, 4, 1) -> (B, H, W, 4, 1)
    # NumPy's @ operator handles the broadcasting automatically.
    # It effectively multiplies each (4,4) pose matrix with each (4,1) point vector
    # within its corresponding batch, height, and width dimension.
    camera_points_batch_homogeneous = w2c_poses_batch[:, np.newaxis, np.newaxis, :, :] @ \
                                      world_points_homogeneous_column_vectors
    # The np.newaxis are crucial here to align the dimensions correctly for broadcasting
    # (B, 1, 1, 4, 4) @ (B, H, W, 4, 1) results in (B, H, W, 4, 1)

    # 4. Extract 3D camera coordinates (Xc, Yc, Zc)
    # Remove the homogeneous coordinate (last dimension) and the column vector dimension
    camera_points_batch = camera_points_batch_homogeneous[..., :3, 0]
    # Final shape: (B, H, W, 3)

    return camera_points_batch
#%%
# sky_mask reset single_view depth_only camera_only img_mask depthmap valid_mask 
pts3d = [b['pts3d'] for b in batches]
depthmap = [b['depthmap'] for b in batches]
print(pts3d[0].shape, depthmap[0].shape)
camera_pose = [b['camera_pose'] for b in batches]
print(camera_pose[0].shape)
#%%
camera_points = world_to_camera_coordinates_batch(pts3d[0], camera_pose[0])
camera_pointsz = camera_points[..., -1]
print(camera_points.shape, camera_pointsz.shape, depthmap[0].shape)
#%%
print((camera_pointsz==depthmap[0]).sum(), (camera_pointsz!=depthmap[0]).sum())
print(((camera_pointsz-depthmap[0])).max())
#%%
print(camera_pointsz[camera_pointsz>0]- depthmap[0][camera_pointsz>0])
#%%
for b in batches:
    print(b['valid_mask'].sum())
#%%
i=0
print((batches[i]['valid_mask']==0).sum())
print((batches[i]['depthmap']==0).sum())
#%%

#%%
# for pose in batches[0]['camera_pose']:
#     print(pose)
#%%
images = [b['img'][0] for b in batches]
labels = [b['label'][0] for b in batches]
print(images[0].shape)
print(labels[0])
# %%
def show_batch_individual(images_list, labels_list, num_images_to_show=6, title="Batch Images"):
    fig = plt.figure(figsize=(15, 8)) # Increased figure size for better visibility
    plt.suptitle(title, fontsize=16)

    actual_num_images = min(num_images_to_show, len(images_list))
    cols = 3 # Number of columns in the grid for display
    rows = int(np.ceil(actual_num_images / cols))

    for i in range(actual_num_images):
        ax = fig.add_subplot(rows, cols, i + 1)
        img_tensor = images_list[i].cpu().numpy() # Get tensor from list, move to CPU, convert to numpy

        # Permute dimensions from (C, H, W) to (H, W, C) for Matplotlib
        if img_tensor.shape[0] == 3: # Color image
            img_display = np.transpose(img_tensor, (1, 2, 0))
        elif img_tensor.shape[0] == 1: # Grayscale image
            img_display = img_tensor.squeeze(0) # Remove channel dimension
        else: # Handle other channel counts or raise an error
            print(f"Warning: Unexpected image shape {img_tensor.shape}. Displaying as is.")
            img_display = img_tensor


        # Optional: Unnormalize if your images are normalized to values outside [0,1] for display
        # For example, if normalized to [-1, 1]:
        # img_display = (img_display + 1) / 2

        ax.imshow(img_display)
        ax.set_title(f"{labels_list[i]}", fontsize=8) # Display the string label
        ax.axis('off') # Hide axes ticks and labels
    plt.tight_layout(rect=[0, 0.03, 1, 0.95]) # Adjust layout to prevent title overlap
    plt.show()
# %%
def show_batch_grid(images_list, title="Batch Image Grid", nrow=None):
    # Stack the list of individual image tensors into a single batch tensor
    # make_grid expects a batch tensor of shape (N, C, H, W)
    images_tensor = torch.stack(images_list)

    if nrow is None:
        # Default nrow to show 4 images per row, or the whole batch if smaller
        nrow = min(3, images_tensor.shape[0])

    # make_grid normalizes images to [0, 1] by default if normalize=True.
    # Set normalize=False if your images are already in the correct range (e.g., 0-255 uint8 or 0-1 float)
    # and you don't want torchvision to scale them.
    grid_img = torchvision.utils.make_grid(images_tensor, nrow=nrow, padding=2, normalize=True)

    # Move to CPU, convert to NumPy, and permute dimensions for Matplotlib
    np_grid_img = grid_img.cpu().numpy()
    if np_grid_img.shape[0] == 3: # Color image (C, H, W) -> (H, W, C)
        np_grid_img = np.transpose(np_grid_img, (1, 2, 0))
    elif np_grid_img.shape[0] == 1: # Grayscale image (1, H, W) -> (H, W)
        np_grid_img = np_grid_img.squeeze(0)
    else:
        print(f"Warning: Unexpected grid image shape {np_grid_img.shape}. Displaying as is.")


    plt.figure(figsize=(10, 10))
    plt.imshow(np_grid_img)
    plt.title(title)
    plt.axis('off')
    plt.show()

show_batch_grid(images, title="Image Grid from Batch (No Labels Displayed on Grid)")
# %%

# show_batch_individual(images, labels, num_images_to_show=len(images), title="Individual Images with Filenames from Batch")