import torch
from tqdm import tqdm
import torch.nn as nn
from utils.sh_utils import eval_sh
from utils.render_utils import projection_ndc
from point_splatting.point_model import PointModel
# from diff_point_rasterization import PointRasterizationSettings, PointRasterizer


# CUDA renderer
# def render(viewpoint_camera, points:PointModel, pipe, bg_color:torch.Tensor, override_color=None):

#     # Set up rasterization configuration
#     tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
#     tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)

#     raster_settings = PointRasterizationSettings(
#         image_height=int(viewpoint_camera.image_height),
#         image_width=int(viewpoint_camera.image_width),
#         tanfovx=tanfovx,
#         tanfovy=tanfovy,
#         bg=bg_color,
#         viewmatrix=viewpoint_camera.world_view_transform,
#         projmatrix=viewpoint_camera.full_proj_transform,
#         sh_degree=3,
#         campos=viewpoint_camera.camera_center,
#         prefiltered=False,
#         debug=pipe.debug)

#     rasterizer = PointRasterizer(raster_settings=raster_settings)

#     point3D = points.get_xyz
#     opacity = points.get_opacity

#     shs = None
#     colors_precomp = None
#     if override_color is None:
#         if pipe.convert_SHs_python:
#             shs_view = points.get_features.transpose(1, 2).view(-1, 3, (points.max_sh_degree+1)**2)
#             dir_pp = (points.get_xyz - viewpoint_camera.camera_center.repeat(points.get_features.shape[0], 1))
#             dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
#             sh2rgb = eval_sh(points.active_sh_degree, shs_view, dir_pp_normalized)
#             colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) # (N, 3)
#         else:
#             shs = points.get_features
#     else:
#         colors_precomp = override_color

#     rendered_image = rasterizer(point3D=point3D, shs=shs, colors_precomp=colors_precomp, opacities=opacity)
    
#     return {"render": rendered_image}

        


# pytorch renderer
class PointRenderer(nn.Module):
    def __init__(self, args):
        super(PointRenderer, self).__init__()
        self.args = args
        self.max_sh_degree = args.sh_degree
        
        bg_color = [1, 1, 1] if args.white_background else [0, 0, 0]
        self.white_bkgd = False
    
    def build_color_from_SHs(self, view, pts, shs, active_sh_degree):
        rays_o = view.camera_center
        rays_d = pts - rays_o
        color = eval_sh(active_sh_degree, shs.permute(0,2,1), rays_d)
        color = (color + 0.5).clip(min=0.0)
        return color
    
    def render(self, view, point2D, color, opacity, depths):
        pix_coord = torch.stack(torch.meshgrid(torch.arange(view.image_width), 
                                                torch.arange(view.image_height), 
                                                indexing='xy'), dim=-1).to('cuda')
        
        render_color = torch.ones(3, *pix_coord.shape[:2]).to('cuda')
        render_depth = torch.zeros(1, *pix_coord.shape[:2]).to('cuda')
        
        background = torch.ones(3).to('cuda') if self.white_bkgd else torch.zeros(3).to('cuda')
        
        for h in tqdm(range(view.image_height)):
            for w in range(view.image_width):
                in_pixel_mask = (point2D[:, 0] >= h) & (point2D[:, 0] < h+1) & \
                                (point2D[:, 1] >= w) & (point2D[:, 1] < w+1)
                
                if not in_pixel_mask.any():
                    render_color[:, h, w] = background
                    continue
                
                sorted_depths, index = torch.sort(depths[in_pixel_mask])
                sorted_opacity = opacity[in_pixel_mask][index].clip(max=0.99)
                sorted_color = color[in_pixel_mask][index]
                
                T = torch.cat([torch.ones_like(sorted_opacity[:, :1]), 1-sorted_opacity[:, :-1]], dim=1).cumprod(dim=1)
                acc_alpha = (sorted_opacity * T).sum(dim=0)
                
                pix_color = (T * sorted_opacity * sorted_color).sum(dim=0) + (1-acc_alpha) * background
                pix_depth = ((T * sorted_opacity) * sorted_depths[None,:,None]).sum(dim=1)    
                render_color[:, h, w] = pix_color         
                render_depth[:, h, w] = pix_depth         

        return render_color, render_depth                
                
    def forward(self, view, point_model, active_sh_degree):
        xyz = point_model.get_xyz
        mean_ndc, mean_view, in_mask = projection_ndc(xyz, viewmatrix=view.world_view_transform, projmatrix=view.projection_matrix)
        mean_ndc = mean_ndc[in_mask]
        mean_view = mean_view[in_mask]
        depths = mean_view[:,2]
        
        pts = xyz[in_mask]
        shs = point_model.get_features[in_mask]
        opacity = point_model.get_opacity[in_mask]
        color = self.build_color_from_SHs(view, pts, shs, active_sh_degree)
        
        mean_coord_x = ((mean_ndc[..., 0] + 1) * view.image_width - 1.0) * 0.5
        mean_coord_y = ((mean_ndc[..., 1] + 1) * view.image_height - 1.0) * 0.5
        point2D = torch.stack([mean_coord_x, mean_coord_y], dim=-1)
        
        render_color, render_depth = self.render(view, point2D, color, opacity, depths)
        
        return {'render_color': render_color,
                'render_depth': render_depth}