
from steps_params import args
from arguments import ModelParams, PipelineParams, get_combined_args
import torch
from gaussian_renderer import GaussianModel
from scene import Scene
import os
from os import makedirs
import torchvision
from argparse import ArgumentParser
from gaussian_renderer import render
from matplotlib import pyplot as plt
import math
from utils.sh_utils import eval_sh
from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
from utils.loss_utils import l1_loss, ssim

parser = ArgumentParser(description="Testing script parameters")
model = ModelParams(parser, sentinel=True)
pipeline = PipelineParams(parser)

dataset, pipeline = model.extract(args), pipeline.extract(args)
iteration = args.iterations

gaussians = GaussianModel(dataset.sh_degree)
scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False, lazy_load=True)

params_list = [
    gaussians._xyz, # 3d高斯核坐标
    gaussians._features_dc, # 
    gaussians._features_rest, # 
    gaussians._opacity,
    gaussians._scaling,
    gaussians._rotation,
]

bg_color = [1,1,1] if dataset.white_background else [0, 0, 0]
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")

views = scene.getTrainCameras()
override_color = None
scaling_modifier = 1.0

for idx, view in enumerate(views):
    viewpoint_camera, pipe, bg_color = view(), pipeline, background
    # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
    means2D = torch.zeros_like(gaussians.get_xyz, dtype=gaussians.get_xyz.dtype, requires_grad=True, device="cuda") + 0
    means2D.retain_grad()

    # Set up rasterization configuration
    tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
    tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)

    raster_settings = GaussianRasterizationSettings(
        image_height=int(viewpoint_camera.image_height),
        image_width=int(viewpoint_camera.image_width),
        tanfovx=tanfovx,
        tanfovy=tanfovy,
        bg=bg_color,
        scale_modifier=scaling_modifier,
        viewmatrix=viewpoint_camera.world_view_transform, # W2C 世界坐标转相机坐标
        projmatrix=viewpoint_camera.full_proj_transform, # 投影矩阵
        sh_degree=gaussians.active_sh_degree, # 球谐函数级数
        campos=viewpoint_camera.camera_center, # 相机中心
        prefiltered=False,
        debug=pipe.debug
    )

    rasterizer = GaussianRasterizer(raster_settings=raster_settings)

    # means3D = gaussians.get_xyz
    # opacity = gaussians.get_opacity

    # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
    # scaling / rotation by the rasterizer.
    scales = None
    rotations = None
    cov3D_precomp = None

    # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
    # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
    shs = None
    colors_precomp = None

    # Rasterize visible Gaussians to image, obtain their radii (on screen). 
    rendered_image, radii = rasterizer(
        means3D =  gaussians.get_xyz, # 高斯中心点
        means2D = means2D, # forward时没用到，backward时用到
        shs = gaussians.get_features, # 球谐系数
        colors_precomp = colors_precomp,
        opacities = gaussians.get_opacity, # 不透明度
        scales = gaussians.get_scaling, # 缩放系数
        rotations = gaussians.get_rotation, # 四元数
        cov3D_precomp = cov3D_precomp)
    rendered_image_np = rendered_image.detach().cpu().numpy().transpose(1,2,0)
    gt = viewpoint_camera.original_image[0:3, :, :]
    loss_l1 = l1_loss(rendered_image, gt)
    loss_ssim = ssim(rendered_image, gt)
    loss = (1.0 - args.lambda_dssim) * loss_l1 + args.lambda_dssim * (1.0 - loss_ssim)
    break