import torch
import math
import numpy as np
from typing import NamedTuple

class BasicPointCloud(NamedTuple):
    points : np.array
    colors : np.array
    normals : np.array

def geom_transform_points(points, transf_matrix):
    P, _ = points.shape
    ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
    points_hom = torch.cat([points, ones], dim=1)
    points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))

    denom = points_out[..., 3:] + 0.0000001
    return (points_out[..., :3] / denom).squeeze(dim=0)

def getWorld2View(R, t):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = R.transpose()
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0
    return np.float32(Rt)

def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = R.transpose()
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0

    C2W = np.linalg.inv(Rt)
    cam_center = C2W[:3, 3]
    cam_center = (cam_center + translate) * scale
    C2W[:3, 3] = cam_center
    Rt = np.linalg.inv(C2W)
    return np.float32(Rt)

def getProjectionMatrix(znear, zfar, fovX, fovY):
    tanHalfFovY = math.tan((fovY / 2))
    tanHalfFovX = math.tan((fovX / 2))

    top = tanHalfFovY * znear
    bottom = -top
    right = tanHalfFovX * znear
    left = -right

    P = torch.zeros(4, 4)

    z_sign = 1.0

    P[0, 0] = 2.0 * znear / (right - left)
    P[1, 1] = 2.0 * znear / (top - bottom)
    P[0, 2] = (right + left) / (right - left)
    P[1, 2] = (top + bottom) / (top - bottom)
    P[3, 2] = z_sign
    P[2, 2] = z_sign * zfar / (zfar - znear)
    P[2, 3] = -(zfar * znear) / (zfar - znear)
    return P

def fov2focal(fov, pixels):
    return pixels / (2 * math.tan(fov / 2))

def focal2fov(focal, pixels):
    return 2*math.atan(pixels/(2*focal))

def generate_sun_map(H, W, u, v, gamma, device=None):

    x = torch.arange(H)
    y = torch.arange(W)
    xx, yy = torch.meshgrid(x, y, indexing='ij')
        
    if device is not None:
        xx, yy = xx.to(device), yy.to(device)

    dist_squared = (xx - u)**2 + (yy - v)**2

    sun_map = torch.exp(-dist_squared / (2 * gamma**2))
    # sun_map = sun_map / torch.max(sun_map)
    
    return sun_map[None]


def get_uniform_points_on_sphere_fibonacci(num_points, *, dtype=None, xnp=torch):
    # https://arxiv.org/pdf/0912.4540.pdf
    # Golden angle in radians
    if dtype is None:
        dtype = xnp.float32
    phi = math.pi * (3. - math.sqrt(5.))
    N = (num_points - 1) / 2
    i = xnp.linspace(-N, N, num_points, dtype=dtype)
    lat = xnp.arcsin(2.0 * i / (2*N+1))
    lon = phi * i

    # Spherical to cartesian
    x = xnp.cos(lon) * xnp.cos(lat)
    y = xnp.sin(lon) * xnp.cos(lat)
    z = xnp.sin(lat)
    return xnp.stack([x, y, z], -1)

def camera_project(cameras, xyz):
    eps = torch.finfo(xyz.dtype).eps  # type: ignore
    device = xyz.device
    assert xyz.shape[-1] == 3

    # World -> Camera
    origins = cameras.world_view_transform[..., :3, 3].to(device)
    rotation = cameras.world_view_transform[..., :3, :3].to(device)
    # Rotation and translation
    uvw = xyz - origins
    uvw = (rotation * uvw[..., :, None]).sum(-2)

    # Camera -> Camera distorted
    uv = torch.where(uvw[..., 2:] > eps, uvw[..., :2] / uvw[..., 2:], torch.zeros_like(uvw[..., :2]))

    # We assume pinhole camera model in 3DGS anyway
    # uv = _distort(cameras.camera_models, cameras.distortion_parameters, uv, xnp=xnp)

    x, y = torch.moveaxis(uv, -1, 0)

    # Transform to image coordinates
    # Camera distorted -> Image
    fx, fy, cx, cy = cameras.fx, cameras.fy, cameras.cx, cameras.cy
    x = fx * x + cx
    y = fy * y + cy
    return torch.stack((x, y), -1)


def light_trajectory_in_hemisphere(N, angle=150, offset_x=0, offset_y=0, offset_z=0):
    light_dirs = []
    thetas = []
    phis = []
    for i in range(N):
        # theta = - angle/180*np.pi
        # phi = 2 * np.pi * (i / N)
        theta = - angle/180*np.pi
        phi = 2*np.pi * (i / N)
        thetas.append(theta)
        phis.append(phi)
        x = np.sin(theta) * np.cos(phi) + offset_x
        z = np.sin(theta) * np.sin(phi) + offset_z
        y = np.cos(theta) + offset_y
        light_dirs.append(np.array([x, y, z]))
    light_dirs = torch.from_numpy(np.stack(light_dirs, axis=0))
    thetas = torch.from_numpy(np.stack(thetas, axis=0))
    phis = torch.from_numpy(np.stack(phis, axis=0))
    return light_dirs.float(), thetas.float(), phis.float()


def homogeneous(points):
    return torch.cat([points, torch.ones_like(points[..., :1])], dim=-1)


def projection_ndc(points, view):
    viewmatrix = view.world_view_transform
    projmatrix = view.projection_matrix
    
    points_o = homogeneous(points) # object space
    points_h = points_o @ viewmatrix @ projmatrix # screen space # RHS
    p_w = 1.0 / (points_h[..., -1:] + 0.000001)
    p_proj = points_h * p_w
    p_view = points_o @ viewmatrix
    in_mask = p_view[..., 2] >= 0.2
    
    mean_ndc = p_proj
    
    mean_coord_x = ((mean_ndc[..., 0] + 1) * view.image_width - 1.0) * 0.5
    mean_coord_y = ((mean_ndc[..., 1] + 1) * view.image_height - 1.0) * 0.5
    means2D = torch.stack([mean_coord_x, mean_coord_y], dim=-1)
    depths = p_view[:, 2]
    return means2D, depths, in_mask


def cartesian_to_spherical(point: torch.Tensor, center: torch.Tensor):    
    rel = point - center
    x, y, z = rel[0], rel[1], rel[2]

    r = torch.linalg.norm(rel)
    theta = torch.acos(z / r)
    phi = torch.atan2(y, x) % (2 * torch.pi)

    return theta, phi

def spherical_to_cartesian(theta: torch.Tensor, phi: torch.Tensor, radius: float, center: torch.Tensor):
    sin_theta = torch.sin(theta)
    x = radius * sin_theta * torch.cos(phi)
    y = radius * sin_theta * torch.sin(phi)
    z = radius * torch.cos(theta)

    point = torch.stack([x, y, z]) + center
    return point