import torch
from torch.nn import functional as F
from diff_surfel_rasterization import GaussianRasterizationSettings, GaussianRasterizer
from scipy.spatial.transform import Rotation
from typing import Tuple, Literal

# %% Functions

# projection matrix
def projection_from_intrinsics(
                K: torch.Tensor, 
                image_size: Tuple[int], 
                near: float=0.1, 
                far:float=10, 
                style:Literal["opengl", "directx"] = "opengl") -> torch.Tensor:
    """
    Transform points from camera space (x: right, y: up, z: out) to clip space (x: right, y: down, z: in)
    Args:
        K: Intrinsic matrix, (N, 3, 3)
            K = [[
                        [fx, 0, cx],
                        [0, fy, cy],
                        [0,  0,  1],
                ]
            ]
        image_size: (height, width)
    Output: 
        OpenGL style projection matrix, z axis range from 0 to 1
        proj = [[
                [2*fx/w, 0.0,     (w - 2*cx)/w,              0.0                    ],
                [0.0,    2*fy/h,  (h - 2*cy)/h,              0.0                    ],
                [0.0,    0.0,     -(far+near) / (far-near), -2*far*near / (far-near)],
                [0.0,    0.0,     -1.0,                      0.0                    ]
            ]
        ]
        Directx style projection matrix, z axis range from -1 to 1
        proj = [[
                [2*fx/w, 0.0,     (w - 2*cx)/w,     0.0                   ],
                [0.0,    2*fy/h,  (h - 2*cy)/h,     0.0                   ],
                [0.0,    0.0,     far / (far-near), -far*near / (far-near)],
                [0.0,    0.0,     1.0,              0.0                   ]
            ]
        ]
    """

    B = K.shape[0]
    h, w = image_size

    if K.shape[-2:] == (3, 3):
      fx = K[..., 0, 0]
      fy = K[..., 1, 1]
      cx = K[..., 0, 2]
      cy = K[..., 1, 2]
    elif K.shape[-1] == 4:
      fx, fy, cx, cy = K[..., [0, 1, 2, 3]].split(1, dim=-1)
    else:
      raise ValueError(f"Expected K to be (N, 3, 3) or (N, 4) but got: {K.shape}")

    proj = torch.zeros([B, 4, 4], device=K.device)
    if style == "opengl":
        proj[:, 0, 0]  = fx * 2 / w 
        proj[:, 1, 1]  = fy * 2 / h
        proj[:, 0, 2]  = (w - 2 * cx) / w
        proj[:, 1, 2]  = (h - 2 * cy) / h
        proj[:, 2, 2]  = -(far+near) / (far-near)
        proj[:, 2, 3]  = -2*far*near / (far-near)
        proj[:, 3, 2]  = -1
    elif style == "directx":
        proj[:, 0, 0]  = fx * 2 / w 
        proj[:, 1, 1]  = fy * 2 / h
        proj[:, 0, 2]  = (w - 2 * cx) / w
        proj[:, 1, 2]  = (h - 2 * cy) / h
        proj[:, 2, 2]  = far / (far-near)
        proj[:, 2, 3]  = -far * near / (far-near)
        proj[:, 3, 2]  = 1
    return proj
  
def get_rasterizer(viewmat, projmat, image_size, sh_degree=0, bg_image=None, device=torch.device("cuda:0")):
    height, width = image_size
    if bg_image is None:
      bg_image = torch.zeros(1, 6).to(device)

    # camera position
    campos = -viewmat[3, :3]

    # full projection
    full_projmat = viewmat.mm(projmat)

    rasterizer = GaussianRasterizer(raster_settings=GaussianRasterizationSettings(
                image_height=int(height),
                image_width=int(width),
                tanfovx = projmat[0, 0], # backward时重要，也用于计算screen_points的梯度
                tanfovy = projmat[1, 1], # backward时重要，也用于计算screen_points的梯度
                bg = bg_image,
                scale_modifier=1.0,
                viewmatrix=viewmat,
                projmatrix=full_projmat, # full projection( view * proj )
                sh_degree=sh_degree,
                campos=campos, # forward没用, backward也许有用
                prefiltered=False,
                debug=False
            ))
    return rasterizer

def depths_to_points(world_view_transform, full_proj_transform, image_size, depthmap):
    c2w = (world_view_transform.T).inverse().to(depthmap.device)
    full_proj_transform = full_proj_transform.to(depthmap.device)
    H, W = image_size
    ndc2pix = torch.tensor([
        [W / 2, 0, 0, (W) / 2],
        [0, H / 2, 0, (H) / 2],
        [0, 0, 0, 1]]).float().cuda().T
    projection_matrix = c2w.T @ full_proj_transform
    intrins = (projection_matrix @ ndc2pix)[:3,:3].T
    
    grid_x, grid_y = torch.meshgrid(torch.arange(W, device='cuda').float(), torch.arange(H, device='cuda').float(), indexing='xy')
    points = torch.stack([grid_x, grid_y, torch.ones_like(grid_x)], dim=-1).reshape(-1, 3)
    rays_d = points @ intrins.inverse().T @ c2w[:3,:3].T
    rays_o = c2w[:3,3]
    points = depthmap.reshape(-1, 1) * rays_d + rays_o
    return points

def depth_to_normal(world_view_transform, full_proj_transform, image_size, depth):
    """
        view: view camera
        depth: depthmap 
    """
    points = depths_to_points(world_view_transform, full_proj_transform, image_size, depth).reshape(*depth.shape[1:], 3)
    output = torch.zeros_like(points)
    dx = torch.cat([points[2:, 1:-1] - points[:-2, 1:-1]], dim=0)
    dy = torch.cat([points[1:-1, 2:] - points[1:-1, :-2]], dim=1)
    normal_map = torch.nn.functional.normalize(torch.cross(dx, dy, dim=-1), dim=-1)
    output[1:-1, 1:-1, :] = normal_map
    return output

def get_from_allmap(allmap, world_view_transform, full_proj_transform, image_size):
    # additional regularizations
    render_alpha = allmap[1:2]

    # get normal map
    # transform normal from view space to world space
    render_normal = allmap[2:5]
    w2c = (world_view_transform[:3,:3].T).to(render_normal.device)
    render_normal = F.normalize((render_normal.permute(1,2,0) @ w2c), dim=-1).permute(2,0,1)

    # get median depth map
    render_depth_median = allmap[5:6]
    render_depth_median = torch.nan_to_num(render_depth_median, 0, 0)

    # get expected depth map
    render_depth_expected = allmap[0:1]
    render_depth_expected = (render_depth_expected / render_alpha)
    render_depth_expected = torch.nan_to_num(render_depth_expected, 0, 0)

    # get depth distortion map
    render_dist = allmap[6:7]

    # psedo surface attributes
    # surf depth is either median or expected by setting depth_ratio to 1 or 0
    # for bounded scene, use median depth, i.e., depth_ratio = 1; 
    # for unbounded scene, use expected depth, i.e., depth_ration = 0, to reduce disk anliasing.
    depth_ratio = 0.0
    surf_depth = render_depth_expected * (1-depth_ratio) + depth_ratio * render_depth_median

    # assume the depth points form the 'surface' and generate psudo surface normal for regularizations.
    surf_normal = depth_to_normal(world_view_transform, full_proj_transform, image_size, surf_depth)
    surf_normal = surf_normal.permute(2,0,1)
    # remember to multiply with accum_alpha since render_normal is unnormalized.
    surf_normal = surf_normal * (render_alpha).detach()
    
    return {'rend_alpha': render_alpha,
            'rend_normal': render_normal,
            'rend_dist': render_dist,
            'surf_depth': surf_depth,
            'surf_normal': surf_normal,}