#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
import math
import numpy as np
from typing import NamedTuple

class BasicPointCloud(NamedTuple):
    points : np.array
    colors : np.array
    normals : np.array

def geom_transform_points(points, transf_matrix):
    P, _ = points.shape
    ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
    points_hom = torch.cat([points, ones], dim=1)
    points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))

    denom = points_out[..., 3:] + 0.0000001
    return (points_out[..., :3] / denom).squeeze(dim=0)

def getWorld2View(R, t):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = R.transpose()
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0
    return np.float32(Rt)

def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = R.transpose()
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0

    C2W = np.linalg.inv(Rt)
    cam_center = C2W[:3, 3]
    cam_center = (cam_center + translate) * scale
    C2W[:3, 3] = cam_center
    Rt = np.linalg.inv(C2W)
    return np.float32(Rt)

def getProjectionMatrix(znear, zfar, fovX, fovY):
    tanHalfFovY = math.tan((fovY / 2))
    tanHalfFovX = math.tan((fovX / 2))

    top = tanHalfFovY * znear
    bottom = -top
    right = tanHalfFovX * znear
    left = -right

    P = torch.zeros(4, 4)

    z_sign = 1.0

    P[0, 0] = 2.0 * znear / (right - left)
    P[1, 1] = 2.0 * znear / (top - bottom)
    P[0, 2] = (right + left) / (right - left)
    P[1, 2] = (top + bottom) / (top - bottom)
    P[3, 2] = z_sign
    P[2, 2] = z_sign * zfar / (zfar - znear)
    P[2, 3] = -(zfar * znear) / (zfar - znear)
    return P

def fov2focal(fov, pixels):
    return pixels / (2 * math.tan(fov / 2))

def focal2fov(focal, pixels):
    return 2*math.atan(pixels/(2*focal))


def ndc_2_cam(ndc_xyz, intrinsic, W, H):
    inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device)
    cam_z = ndc_xyz[..., 2:3]
    cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z
    cam_xyz = torch.cat([cam_xy, cam_z], dim=-1)
    cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t())
    return cam_xyz

def depth2point_cam(sampled_depth, ref_intrinsic):
    B, N, C, H, W = sampled_depth.shape
    valid_z = sampled_depth
    valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / (W - 1)
    valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / (H - 1)
    valid_x, valid_y = torch.meshgrid(valid_x, valid_y, indexing='xy')
    # B,N,H,W
    valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1)
    valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1)
    ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view(B, N, C, H, W, 3)  # 1, 1, 5, 512, 640, 3
    cam_xyz = ndc_2_cam(ndc_xyz, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3
    return ndc_xyz, cam_xyz

def depth2point_world(depth_image, intrinsic_matrix, extrinsic_matrix):
    # depth_image: (H, W), intrinsic_matrix: (3, 3), extrinsic_matrix: (4, 4)
    _, xyz_cam = depth2point_cam(depth_image[None,None,None,...], intrinsic_matrix[None,...])
    xyz_cam = xyz_cam.reshape(-1,3)
    # xyz_world = torch.cat([xyz_cam, torch.ones_like(xyz_cam[...,0:1])], axis=-1) @ torch.inverse(extrinsic_matrix).transpose(0,1)
    # xyz_world = xyz_world[...,:3]
    return xyz_cam


def partial_depth2cam_and_world(depth_values, pixel_coords, intrinsic_matrix, extrinsic_matrix=None):
    device = depth_values.device
    N = depth_values.shape[0]

    uvs_h = torch.cat([pixel_coords, torch.ones((N, 1), device=device)], dim=-1)  # (N, 3)

    K_inv = torch.inverse(intrinsic_matrix)
    rays = (K_inv @ uvs_h.T).T  # (N, 3)

    cam_points = rays * depth_values.unsqueeze(-1)  # (N, 3)

    if extrinsic_matrix is not None:
        cam_points_h = torch.cat([cam_points, torch.ones((N, 1), device=device)], dim=-1)  # (N, 4)

        world_points_h = (torch.inverse(extrinsic_matrix) @ cam_points_h.T).T  # (N, 4)
        world_points = world_points_h[:, :3]
        return cam_points, world_points
    else:
        return cam_points, None


def closed_form_inverse_se3(se3, R=None, T=None):
    """
    Compute the inverse of each 4x4 (or 3x4) SE3 matrix in a batch.

    If `R` and `T` are provided, they must correspond to the rotation and translation
    components of `se3`. Otherwise, they will be extracted from `se3`.

    Args:
        se3: Nx4x4 or Nx3x4 array or tensor of SE3 matrices.
        R (optional): Nx3x3 array or tensor of rotation matrices.
        T (optional): Nx3x1 array or tensor of translation vectors.

    Returns:
        Inverted SE3 matrices with the same type and device as `se3`.

    Shapes:
        se3: (N, 4, 4)
        R: (N, 3, 3)
        T: (N, 3, 1)
    """
    # Check if se3 is a numpy array or a torch tensor
    is_numpy = isinstance(se3, np.ndarray)

    # Validate shapes
    if se3.shape[-2:] != (4, 4) and se3.shape[-2:] != (3, 4):
        raise ValueError(f"se3 must be of shape (N,4,4), got {se3.shape}.")

    # Extract R and T if not provided
    if R is None:
        R = se3[:, :3, :3]  # (N,3,3)
    if T is None:
        T = se3[:, :3, 3:]  # (N,3,1)

    # Transpose R
    if is_numpy:
        # Compute the transpose of the rotation for NumPy
        R_transposed = np.transpose(R, (0, 2, 1))
        # -R^T t for NumPy
        top_right = -np.matmul(R_transposed, T)
        inverted_matrix = np.tile(np.eye(4), (len(R), 1, 1))
    else:
        R_transposed = R.transpose(1, 2)  # (N,3,3)
        top_right = -torch.bmm(R_transposed, T)  # (N,3,1)
        inverted_matrix = torch.eye(4, 4)[None].repeat(len(R), 1, 1)
        inverted_matrix = inverted_matrix.to(R.dtype).to(R.device)

    inverted_matrix[:, :3, :3] = R_transposed
    inverted_matrix[:, :3, 3:] = top_right

    return inverted_matrix


def depth_to_world_points(depth_map, intrinsic_matrix, extrinsic_matrix, eps=1e-8):
    '''
        depth_map: (H, W)
        intrinsic_matrix: (3, 3)
        extrinsic_matrix: (4, 4)
    '''
    device = depth_map.device
    point_mask = depth_map > eps
    
    H, W = depth_map.shape
    assert intrinsic_matrix.shape == (3, 3), "Intrinsic matrix must be 3x3"
    assert intrinsic_matrix[0, 1] == 0 and intrinsic_matrix[1, 0] == 0, "Intrinsic matrix must have zero skew"

    # Intrinsic parameters
    fu, fv = intrinsic_matrix[0, 0].item(), intrinsic_matrix[1, 1].item()
    cu, cv = intrinsic_matrix[0, 2].item(), intrinsic_matrix[1, 2].item()

    # Generate grid of pixel coordinates
    uu = torch.arange(W).view(1, W).expand(H, W).to(device)
    vv = torch.arange(H).view(H, 1).expand(H, W).to(device)
    x_cam = (uu - cu) * depth_map / fu
    y_cam = (vv - cv) * depth_map / fv
    z_cam = depth_map

    # Stack to form camera coordinates
    cam_coords_points = torch.stack((x_cam, y_cam, z_cam), dim=-1).reshape(-1, 3).float()
    cam_to_world_extrinsic = closed_form_inverse_se3(extrinsic_matrix[None])[0]
        
    R_cam_to_world = cam_to_world_extrinsic[:3, :3]
    t_cam_to_world = cam_to_world_extrinsic[:3, 3]

    world_coords_points = cam_coords_points @ R_cam_to_world.T + t_cam_to_world  # HxWx3, 3x3 -> HxWx3
    
    return world_coords_points, cam_coords_points, point_mask


def get_topk_pixels(error_map, depth_map, k=-1):
    """
        error_map: (H, W), depth_map: (H, W)
        Return: topk screen coords (N, 2) and depth values (N, 1)
    """
    H, W = error_map.shape
    error_flat = error_map.view(-1)
    if k < 0:
        k = H*W
    topk_vals, topk_idx = torch.topk(error_flat, k, largest=True)
    x = torch.div(topk_idx, W, rounding_mode='trunc')
    y = topk_idx % W
    coords2D = torch.stack([x, y], dim=1).float()   # (N, 2)    
    depths = depth_map[x, y]          # (N,)
    
    return coords2D, depths

def depth2points(depth_map, intrinsic_matrix, extrinsic_matrix, scale=10):
    device = depth_map.device
    H, W = depth_map.shape
    fx, fy = intrinsic_matrix[0, 0].item(), intrinsic_matrix[1, 1].item()
    cx, cy = intrinsic_matrix[0, 2].item(), intrinsic_matrix[1, 2].item()
    
    h, w = torch.meshgrid(torch.arange(H, device=device), torch.arange(W, device=device), indexing='ij')
    
    z = depth_map
    x = (w - cx) * z / fx
    y = (h - cy) * z / fy
    xyz_cam = torch.stack((x, y, z), dim=-1)[::scale, ::scale].reshape(-1, 3)
    
    xyz_world = torch.cat([xyz_cam, torch.ones_like(xyz_cam[...,0:1])], axis=-1) @ torch.inverse(extrinsic_matrix).transpose(0,1)
    xyz_world = xyz_world[...,:3]
    
    return xyz_world