#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
import math
import numpy as np
from typing import NamedTuple

class BasicPointCloud(NamedTuple):
    points : np.array
    colors : np.array
    normals : np.array

def geom_transform_points(points, transf_matrix):
    P, _ = points.shape
    ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
    points_hom = torch.cat([points, ones], dim=1)
    points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))

    denom = points_out[..., 3:] + 0.0000001
    return (points_out[..., :3] / denom).squeeze(dim=0)

def getWorld2View(R, t):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = R.transpose()
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0
    return np.float32(Rt)

def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
    Rt = np.zeros((4, 4))
    Rt[:3, :3] = R.transpose()
    Rt[:3, 3] = t
    Rt[3, 3] = 1.0

    C2W = np.linalg.inv(Rt)
    cam_center = C2W[:3, 3]
    cam_center = (cam_center + translate) * scale
    C2W[:3, 3] = cam_center
    Rt = np.linalg.inv(C2W)
    return np.float32(Rt)

def getProjectionMatrix(znear, zfar, fovX, fovY):
    tanHalfFovY = math.tan((fovY / 2))
    tanHalfFovX = math.tan((fovX / 2))

    top = tanHalfFovY * znear
    bottom = -top
    right = tanHalfFovX * znear
    left = -right

    P = torch.zeros(4, 4)

    z_sign = 1.0

    P[0, 0] = 2.0 * znear / (right - left)
    P[1, 1] = 2.0 * znear / (top - bottom)
    P[0, 2] = (right + left) / (right - left)
    P[1, 2] = (top + bottom) / (top - bottom)
    P[3, 2] = z_sign
    P[2, 2] = z_sign * zfar / (zfar - znear)
    P[2, 3] = -(zfar * znear) / (zfar - znear)
    return P

def fov2focal(fov, pixels):
    return pixels / (2 * math.tan(fov / 2))

def focal2fov(focal, pixels):
    return 2*math.atan(pixels/(2*focal))

def compute_weights(d, mode="softmax", alpha=1.0, sigma=1.0, eps=1e-6):
    if mode == "inverse":
        w = 1.0 / (d + eps)
    elif mode == "gaussian":
        w = torch.exp(-d**2 / (2 * sigma**2))
    elif mode == "softmax":
        dd = -(d - d.max())
        w = torch.softmax(dd, dim=0)
        return w
    else:
        raise ValueError("Unknown mode")
    return w / (w.sum() + eps)

def compute_camera_distance_by_RT(camera, Rs, Ps, lam=1.0, mode='softmax', alpha=1.0, sigma=1.0, eps=1e-6):
    R_ref = torch.from_numpy(camera.R).cuda() 
    P_ref = torch.from_numpy(camera.T).cuda()
    
    dP = Ps - P_ref.unsqueeze(0)                     # (N,3)
    t = torch.norm(torch.matmul(R_ref.T, dP.T).T, dim=1) # (N,)

    R_rel = torch.matmul(R_ref.T, Rs)   # (N,3,3)
    trace_vals = torch.einsum('nii->n', R_rel)  # (N,)
    cos_theta = (trace_vals - 1) / 2
    cos_theta = torch.clamp(cos_theta, -1.0, 1.0)  # 防止数值溢出
    theta = torch.acos(cos_theta)  # (N,)

    d = torch.sqrt(t**2 + lam * theta**2)
    weights = compute_weights(d, mode=mode, alpha=alpha, sigma=sigma, eps=eps)
    return weights.float()

def homogeneous(points):
    return torch.cat([points, torch.ones_like(points[..., :1])], dim=-1)

def projection_ndc(points, view):
    viewmatrix = view.world_view_transform
    projmatrix = view.projection_matrix
    
    points_o = homogeneous(points) # object space
    points_h = points_o @ viewmatrix @ projmatrix # screen space # RHS
    p_w = 1.0 / (points_h[..., -1:] + 0.000001)
    p_proj = points_h * p_w
    p_view = points_o @ viewmatrix
    in_mask = p_view[..., 2] >= 0.2
    
    mean_ndc = p_proj
    
    mean_coord_x = ((mean_ndc[..., 0] + 1) * view.image_width - 1.0) * 0.5
    mean_coord_y = ((mean_ndc[..., 1] + 1) * view.image_height - 1.0) * 0.5
    means2D = torch.stack([mean_coord_x, mean_coord_y], dim=-1)
    depths = p_view[:, 2]
    return means2D, depths, in_mask

def encode(tensor, scale=1e4):
    tensor = (tensor * scale).long()
    keys = tensor[:, 0] * 10**8 + tensor[:, 1] * 10**4 + tensor[:, 2]
    return keys

def inter_count(points1, points2):
    keys1, keys2 = encode(points1), encode(points2)
    set1 = set(keys1.tolist())
    set2 = set(keys2.tolist())
    count = len(set1 & set2)
    return count

def compute_camera_distance_by_unproject(points, cur_camera, cameras):
    c_uv, _, c_in_mask = projection_ndc(points, cur_camera)
    cH, cW = cur_camera.image_height, cur_camera.image_width
    c_mask1 = (c_uv[:, 0] < cH) & (c_uv[:, 1] < cW) & (c_uv[:, 0] >= 0) & (c_uv[:, 1] >= 0)   
    c_mask = torch.logical_and(c_in_mask, c_mask1)
    c_points = points[c_mask]
    
    dists = []
    for c in cameras:
        uv, _, in_mask = projection_ndc(points, c)
        H, W = c.image_height, c.image_width
        mask1 = (uv[:, 0] < H) & (uv[:, 1] < W) & (uv[:, 0] >= 0) & (uv[:, 1] >= 0)   
        mask = torch.logical_and(in_mask, mask1)
        points_ = points[mask]
        count = inter_count(c_points, points_)
        dists.append(count / points_.shape[0])
    return torch.from_numpy(np.asarray(dists)).to(points.device)

def get_plucker_coords(camera):
    return