import torch
import numpy as np


def cast_rays(ori, dir, z_vals):
    return ori[..., None, :] + z_vals[..., None] * dir[..., None, :]


def get_ray_directions(W, H, fx, fy, cx, cy, use_pixel_centers=True, use_UE4=False):
    

    pixel_center = 0.5 if use_pixel_centers else 0
    i, j = np.meshgrid(
        np.arange(W, dtype=np.float32) + pixel_center,
        np.arange(H, dtype=np.float32) + pixel_center,
        indexing='xy'
    )
    i, j = torch.from_numpy(i), torch.from_numpy(j)

    directions = torch.stack([(i - cx) / fx, -(j - cy) / fy, -torch.ones_like(i)], -1) # (H, W, 3)
    if use_UE4:
        dirs = torch.stack([(i - cx) / fx, (j - cy) / fy, torch.ones_like(i)], -1)
        directions = torch.stack([dirs[..., 2], dirs[..., 0], -dirs[..., 1]], -1)
    return directions

def get_rays_UE4(H: int, W: int, K: np.ndarray, c2w: np.ndarray, combine=False):
    """

    Args:
        H: height of image
        W: width of image
        K: intrinsic matrix of camera: [[f,0,cx],[0,f,cy],[0,0,1]
        c2w: (4, 4) transformation matrix from camera coordinate to world coordinate, in our carla dataset, this should be the
        "My car pose"
        shift: the translation from car to camera, mainly is something like  (0.,0.,2.5)
        combine: whether to mix the ray_o and rays_d
    Returns:
        rays_o: (H,W, 3), the origin of the rays in world coordinate
        rays_d: (H,W, 3), the normalized direction of the rays in world coordinate

    """
    i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
    dirs = np.stack([(i - W * 0.5) / K[0, 0], (j - H * 0.5) / K[1, 1], np.ones_like(i)], -1)
    # In the above, we got the direction coordinate in "standard" camera coordinate
    # Now we must change from a "standard" coordinate to UE4's coordinate system

    #        . z                      ^ z
    #       /                         |
    #       +-------> x   to          |
    #      |                          | . x
    #      |                          |/
    #      v y                        +-------> y

    # so mainly, (x,y,z) in the "standard" coordinate will become (z,x,-y)
    rays_d = np.stack([dirs[..., 2], dirs[..., 0], -dirs[..., 1]], -1)
    rays_d = np.sum(rays_d[..., np.newaxis, :] * c2w[:3, :3], axis=-1)
    rays_d = rays_d / np.linalg.norm(rays_d, axis=-1, keepdims=True)  # normalize viewdirs
    rays_o = np.broadcast_to(c2w[:3, 3], rays_d.shape)
    assert rays_o.shape == rays_d.shape
    assert rays_o.shape == (H, W, 3)
    if combine:
        rays = np.concatenate([rays_o, rays_d], axis=-1)
        assert rays.shape == (H, W, 6)
        return rays
    else:
        return rays_o, rays_d
    

def get_rays(directions, c2w, UE4_coordinate=False, keepdim=False):
    # Rotate ray directions from camera coordinate to the world coordinate
    # rays_d = directions @ c2w[:, :3].T # (H, W, 3) # slow?
    assert directions.shape[-1] == 3
    
    if directions.ndim == 2: # (N_rays, 3)
        assert c2w.ndim == 3 # (N_rays, 4, 4) / (1, 4, 4)
        rays_d = (directions[:,None,:] * c2w[:,:3,:3]).sum(-1) # (N_rays, 3)
        rays_o = c2w[:,:,3].expand(rays_d.shape)
    elif directions.ndim == 3: # (H, W, 3)
        if c2w.ndim == 2: # (4, 4)
            rays_d = (directions[:,:,None,:] * c2w[None,None,:3,:3]).sum(-1) # (H, W, 3)
            rays_o = c2w[None,None,:,3].expand(rays_d.shape)
        elif c2w.ndim == 3: # (B, 4, 4)
            rays_d = (directions[None,:,:,None,:] * c2w[:,None,None,:3,:3]).sum(-1) # (B, H, W, 3)
            rays_o = c2w[:,None,None,:,3].expand(rays_d.shape)

    if not keepdim:
        rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)       
    return rays_o, rays_d
import numpy as np

def get_rotation_matrix_left(roll, pitch, yaw):

    # Define the rotation angles in radians

    # Define the rotation matrix for each axis
    Rx = np.array([[1, 0, 0],
                   [0, np.cos(roll), np.sin(roll)],
                   [0, -np.sin(roll), np.cos(roll)]])

    Ry = np.array([[np.cos(pitch), 0, -np.sin(pitch)],
                   [0, 1, 0],
                   [np.sin(pitch), 0, np.cos(pitch)]])

    Rz = np.array([[np.cos(yaw), np.sin(yaw), 0],
                   [-np.sin(yaw), np.cos(yaw), 0],
                   [0, 0, 1]])

    # Compute the total rotation matrix for the left-handed coordinate system
    R = np.dot(np.dot(Rz, Ry), Rx)
    # values less than 1e-6 are set to 0
    R[np.abs(R) < 1e-6] = 0
    return R

def get_rays_UE4(H: int, W: int, K: np.ndarray,c2w: np.ndarray, shift = np.array([0.0,0.0,2.5]),combine=False):
    """

    Args:
        H: height of image
        W: width of image
        K: intrinsic matrix of camera: [[f,0,cx],[0,f,cy],[0,0,1]
        c2w: (3, 4) transformation matrix from camera coordinate to world coordinate, in our carla dataset, this should be the
        "My car pose"
        shift: the translation from car to camera, mainly is something like  (0.,0.,2.5)
        combine: whether to mix the ray_o and rays_d
    Returns:
        rays_o: (H,W, 3), the origin of the rays in world coordinate
        rays_d: (H,W, 3), the normalized direction of the rays in world coordinate

    """
    i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
    dirs = np.stack([(i - W * 0.5) / K[0, 0], (j - H * 0.5) / K[1, 1], np.ones_like(i)], -1)
    # In the above, we got the direction coordinate in "standard" camera coordinate
    # Niw we must change from a "standard" coordinate to UE4's coordinate system
    # camera coordinate system (the same used by OpenCV):

    #        . z                      ^ z
    #       /                         | 
    #       +-------> x   to          | 
    #      |                          | . x  
    #      |                          |/     
    #      v y                        +-------> y  
    
    # so mainly, (x,y,z) in the "standard" coordinate will become (z,x,-y)
    rays_d = np.stack([dirs[...,2],dirs[...,0],-dirs[...,1]],-1)
    c2w[:,3] = c2w[:,3] + shift.reshape(3,)
    rays_d = np.sum(rays_d[..., np.newaxis, :] * c2w[:3, :3], axis=-1)
    rays_d = rays_d / np.linalg.norm(rays_d, axis=-1, keepdims=True)  # normalize viewdirs
    rays_o = np.broadcast_to(c2w[:3, -1], rays_d.shape)
    assert rays_o.shape == rays_d.shape
    assert rays_o.shape == (H,W,3)
    if combine:
        rays = np.concatenate([rays_o, rays_d], axis=-1)
        assert rays.shape == (H,W,6)
        return rays
    else:
        return rays_o, rays_d