import torch
import numpy as np
from torch.utils.checkpoint import checkpoint

world_radius = 1.0  # Radius of the world in each dimension
default_device = 'cuda' if torch.cuda.is_available() else 'cpu'

def build_rotation_matrices (rotations : torch.Tensor) -> torch.Tensor:
    '''
    Build rotation matrices from quaternion representations.
    :param rotations: (n, 4) tensor of quaternions (qw, qx, qy, qz)
    :return: (n, 3, 3) tensor of rotation matrices
    '''
    assert rotations.dim() == 2 and rotations.size(1) == 4, "Rotations must be a (n, 4) tensor."
    n = rotations.size(0)
    qw, qx, qy, qz = rotations.unbind(-1)  # Each is (n,)
    rot_matrices = torch.zeros((n, 3, 3), device=rotations.device)
    rot_matrices[:, 0, 0] = 1 - 2 * (qy**2 + qz**2)
    rot_matrices[:, 0, 1] = 2 * (qx*qy - qw*qz)
    rot_matrices[:, 0, 2] = 2 * (qx*qz + qw*qy)
    rot_matrices[:, 1, 0] = 2 * (qx*qy + qw*qz)
    rot_matrices[:, 1, 1] = 1 - 2 * (qx**2 + qz**2)
    rot_matrices[:, 1, 2] = 2 * (qy*qz - qw*qx)
    rot_matrices[:, 2, 0] = 2 * (qx*qz - qw*qy)
    rot_matrices[:, 2, 1] = 2 * (qy*qz + qw*qx)
    rot_matrices[:, 2, 2] = 1 - 2 * (qx**2 + qy**2)
    return rot_matrices  # (n, 3, 3)

def build_scale_matrices (scales : torch.Tensor) -> torch.Tensor:
    '''
    Build scale matrices from scale vectors.
    :param scales: (n, 3) tensor of scales
    :return: (n, 3, 3) tensor of scale matrices
    '''
    assert scales.dim() == 2 and scales.size(1) == 3, "Scales must be a (n, 3) tensor."
    n = scales.size(0)
    scale_matrices = torch.zeros((n, 3, 3), device=scales.device)
    scale_matrices[:, 0, 0] = scales[:, 0]
    scale_matrices[:, 1, 1] = scales[:, 1]
    scale_matrices[:, 2, 2] = scales[:, 2]
    return scale_matrices  # (n, 3, 3)

class VolumePrimitives:
    '''
    A base class for volume primitives.
    '''

    def __init__(self):
        raise NotImplementedError("VolumePrimitives is an abstract class and cannot be instantiated directly.")
    def evaluate_at_positions(self, positions: torch.Tensor) -> torch.Tensor:
        '''
        Evaluate the volume opacity and weighted color at given positions.
        :param positions: (n, 3) tensor of positions
        :return: (n, 4) tensor of evaluated values, the 4th channel is the opacity
        '''
        raise NotImplementedError("Subclasses must implement evaluate_at_positions method.")

class UniformVolumePrimitives(VolumePrimitives):
    '''
    A class to represent linear volumes using an sum of uniform spheres.
    '''
    def __init__(self, num_spheres=10):
        self.num_spheres : int = num_spheres
        # (To world) Scales of the spheres, (n, 3)
        self.scales_raw : torch.Tensor = torch.ones((num_spheres, 3), dtype=torch.float32)
        # (To world) Rotations of the spheres, (n, 4) for quaternion representation
        self.rotations_raw : torch.Tensor = torch.zeros((num_spheres, 4), dtype=torch.float32)
        # Opacities of the spheres, (n, 1)
        self.opacities_raw : torch.Tensor = torch.ones((num_spheres, 1), dtype=torch.float32)
        # Centers of the spheres, (n, 3)
        self.centers : torch.Tensor = torch.zeros((num_spheres, 3), dtype=torch.float32)
        # Colors of the spheres, (n, 3)
        self.colors_raw : torch.Tensor = torch.ones((num_spheres, 3), dtype=torch.float32)

    def scales_activation(self, scales_raw: torch.Tensor) -> torch.Tensor:
        '''
        Activation function for scales to ensure they are positive.
        :param scales_raw: (n, 3) tensor of raw scales
        :return: (n, 3) tensor of activated scales
        '''
        return scales_raw#torch.exp(scales_raw)

    @property
    def scales(self) -> torch.Tensor:
        '''
        Get the scales of the spheres.
        :return: (n, 3) tensor of scales
        '''
        return self.scales_activation(self.scales_raw)
    
    @property
    def rotations(self) -> torch.Tensor:
        '''
        Get the rotations of the spheres.
        :return: (n, 4) tensor of rotations
        '''
        return self.rotations_raw / torch.norm(self.rotations_raw, dim=1, keepdim=True)
    
    @property
    def opacities(self) -> torch.Tensor:
        '''
        Get the opacities of the spheres.
        :return: (n, 1) tensor of opacities
        '''
        return torch.sigmoid(self.opacities_raw)
    
    @property
    def colors(self) -> torch.Tensor:
        '''
        Get the colors of the spheres.
        :return: (n, 3) tensor of colors
        '''
        return torch.sigmoid(self.colors_raw)

    def random_initialization (self, device = 'cpu'):
        '''
        Randomly initialize the parameters of the spheres.
        '''
        self.scales_raw = torch.rand((self.num_spheres, 3), device=device) * 0.1 * world_radius
        self.rotations_raw = torch.randn((self.num_spheres, 4), device=device)
        # self.rotations = self.rotations / torch.norm(self.rotations, dim=1, keepdim=True)
        self.opacities_raw = torch.rand((self.num_spheres, 1), device=device)
        self.centers = 0.6 * (torch.rand((self.num_spheres, 3), device=device) * world_radius * 2 - world_radius)
        self.colors_raw = torch.rand((self.num_spheres, 3), device=device)

    def evaluate_at_positions(self, positions: torch.Tensor) -> torch.Tensor:
        '''
        Evaluate the volume opacity and weighted color at given positions.
        :param positions: (n, 3) tensor of positions
        :return: (n, 4) tensor of evaluated values, the 4th channel is the opacity
        '''
        assert positions.dim() == 2 and positions.size(1) == 3, "Positions must be a (n, 3) tensor."
        n = positions.size(0)
        
        # Expand positions to (n, num_spheres, 3) for vectorized computation
        expanded_positions = positions.unsqueeze(1).expand(-1, self.num_spheres, -1)  # (n, num_spheres, 3)
        expanded_centers = self.centers.unsqueeze(0).expand(n, -1, -1)  # (n, num_spheres, 3)
        
        # Get positions relative to ellipsoid centers
        rel_positions = expanded_positions - expanded_centers  # (n, num_spheres, 3)
        
        # Build rotation matrices for all spheres at once (num_spheres, 3, 3)
        rot_matrices = build_rotation_matrices(self.rotations)  # (num_spheres, 3, 3)
        
        # Apply inverse rotation: each point with each rotation matrix
        # Reshape for batched matrix multiplication
        rel_positions_flat = rel_positions.reshape(-1, 3)  # (n*num_spheres, 3)
        
        # Corrected expansion of rotation matrices
        # rot_matrices_expanded = rot_matrices.repeat_interleave(n, dim=0) # Incorrect: (n*num_spheres, 3, 3) but wrong order
        # We need rot_matrices[k % num_spheres] for the k-th element of rel_positions_flat
        rot_matrices_expanded = rot_matrices.unsqueeze(0).repeat(n, 1, 1, 1).reshape(-1, 3, 3) # (n*num_spheres, 3, 3)
        
        # Apply rotation to all points
        rotated_positions = torch.bmm(rel_positions_flat.unsqueeze(1), 
                                      rot_matrices_expanded.transpose(1, 2)).squeeze(1)  # (n*num_spheres, 3)
        rotated_positions = rotated_positions.reshape(n, self.num_spheres, 3)  # (n, num_spheres, 3)
        
        # Apply inverse scaling
        expanded_scales = self.scales.unsqueeze(0).expand(n, -1, -1)  # (n, num_spheres, 3)
        scaled_positions = rotated_positions / expanded_scales  # (n, num_spheres, 3)
        
        # Check if points are inside the unit sphere in the transformed space
        inside_mask = torch.sum(scaled_positions**2, dim=2, keepdim=True) <= 1.0  # (n, num_spheres, 1)
        inside_mask = inside_mask.float()  # (n, num_spheres, 1)
        
        # Apply opacity
        expanded_opacities = self.opacities.unsqueeze(0).expand(n, -1, -1)  # (n, num_spheres, 1)
        opacity_contribution = inside_mask * expanded_opacities  # (n, num_spheres, 1)
        
        # Calculate weighted color contributions
        expanded_colors = self.colors.unsqueeze(0).expand(n, -1, -1)  # (n, num_spheres, 3)
        color_contributions = opacity_contribution.expand(-1, -1, 3) * expanded_colors  # (n, num_spheres, 3)
        
        # Sum contributions across all spheres
        weighted_colors = torch.sum(color_contributions, dim=1)  # (n, 3)
        total_opacity = torch.sum(opacity_contribution, dim=1)  # (n, 1)

        # Normalize colors to ensure they are in [0, 1]
        weighted_colors = weighted_colors / torch.clamp(total_opacity + 1e-6, min=1e-6)  # Avoid division by zero
        
        # Clamp total opacity to [0, 1]
        total_opacity = torch.clamp(total_opacity, 0.0, 1.0)
        
        # Combine weighted colors and opacity
        evaluated_values = torch.cat([weighted_colors, total_opacity], dim=1)  # (n, 4)
        
        return evaluated_values
        
def evaluate_epanechnikov(positions: torch.Tensor) -> torch.Tensor:
    '''
    Evaluate the Epanechnikov kernel at given positions.
    :param positions: (..., 3) tensor of positions
    :return: (..., 1) tensor of evaluated values
    '''
    assert positions.dim() >= 2 and positions.size(-1) == 3, "Positions must be a (..., 3) tensor."
    squared_norm = torch.sum(positions**2, dim=-1, keepdim=True)  # (..., 1)
    # Epanechnikov kernel: 0.75 * (1 - ||x||^2) for ||x|| <= 1, 0 otherwise
    kernel_values = 0.75 * (1 - squared_norm) * (squared_norm <= 1.0).float()  # (..., 1)
    return kernel_values.clamp(min=0.0)  # Ensure non-negative values

class EpanechnikovVolumePrimitives(VolumePrimitives):
    '''
    A class to represent linear volumes using an sum of uniform spheres.
    '''
    def __init__(self, num_spheres=10):
        self.num_spheres : int = num_spheres
        # Radius of the spheres, (n, 3)
        self.scales : torch.Tensor = torch.ones((num_spheres, 3), dtype=torch.float32)
        # Rotations of the spheres, (n, 4) for quaternion representation
        self.rotations : torch.Tensor = torch.zeros((num_spheres, 4), dtype=torch.float32)
        # Opacities of the spheres, (n, 1)
        self.opacities : torch.Tensor = torch.ones((num_spheres, 1), dtype=torch.float32)
        # Centers of the spheres, (n, 3)
        self.centers : torch.Tensor = torch.zeros((num_spheres, 3), dtype=torch.float32)
        # Colors of the spheres, (n, 3)
        self.colors : torch.Tensor = torch.ones((num_spheres, 3), dtype=torch.float32)

    def random_initialization (self, device = 'cpu'):
        '''
        Randomly initialize the parameters of the spheres.
        '''
        self.scales = torch.rand((self.num_spheres, 3), device=device) * 2.0 + 0.5
        self.rotations = torch.randn((self.num_spheres, 4), device=device)
        self.rotations = self.rotations / torch.norm(self.rotations, dim=1, keepdim=True)
        self.opacities = torch.rand((self.num_spheres, 1), device=device) * 0.8 + 0.2
        self.centers = torch.rand((self.num_spheres, 3), device=device) * world_radius * 2 - world_radius
        self.colors = torch.rand((self.num_spheres, 3), device=device)

    def evaluate_at_positions(self, positions: torch.Tensor) -> torch.Tensor:
        '''
        Evaluate the volume opacity and weighted color at given positions.
        :param positions: (n, 3) tensor of positions
        :return: (n, 4) tensor of evaluated values, the 4th channel is the opacity
        '''
        assert positions.dim() == 2 and positions.size(1) == 3, "Positions must be a (n, 3) tensor."
        n = positions.size(0)
        
        # Expand positions to (n, num_spheres, 3) for vectorized computation
        expanded_positions = positions.unsqueeze(1).expand(-1, self.num_spheres, -1)  # (n, num_spheres, 3)
        expanded_centers = self.centers.unsqueeze(0).expand(n, -1, -1)  # (n, num_spheres, 3)
        
        # Get positions relative to ellipsoid centers
        rel_positions = expanded_positions - expanded_centers  # (n, num_spheres, 3)
        
        # Build rotation matrices for all spheres at once (num_spheres, 3, 3)
        rot_matrices = build_rotation_matrices(self.rotations)  # (num_spheres, 3, 3)
        
        # Apply inverse rotation: each point with each rotation matrix
        # Reshape for batched matrix multiplication
        rel_positions_flat = rel_positions.reshape(-1, 3)  # (n*num_spheres, 3)
        
        # Corrected expansion of rotation matrices
        # rot_matrices_expanded = rot_matrices.repeat_interleave(n, dim=0) # Incorrect: (n*num_spheres, 3, 3) but wrong order
        # We need rot_matrices[k % num_spheres] for the k-th element of rel_positions_flat
        rot_matrices_expanded = rot_matrices.unsqueeze(0).repeat(n, 1, 1, 1).reshape(-1, 3, 3) # (n*num_spheres, 3, 3)
        
        # Apply rotation to all points
        rotated_positions = torch.bmm(rel_positions_flat.unsqueeze(1), 
                                      rot_matrices_expanded.transpose(1, 2)).squeeze(1)  # (n*num_spheres, 3)
        rotated_positions = rotated_positions.reshape(n, self.num_spheres, 3)  # (n, num_spheres, 3)
        
        # Apply inverse scaling
        expanded_scales = self.scales.unsqueeze(0).expand(n, -1, -1)  # (n, num_spheres, 3)
        scaled_positions = rotated_positions / expanded_scales  # (n, num_spheres, 3)
        
        # Check if points are inside the unit sphere in the transformed space
        alpha_mask = evaluate_epanechnikov(scaled_positions)  # (n, num_spheres, 1)
        alpha_mask = alpha_mask.float()  # (n, num_spheres, 1)
        
        # Apply opacity
        expanded_opacities = self.opacities.unsqueeze(0).expand(n, -1, -1)  # (n, num_spheres, 1)
        opacity_contribution = alpha_mask * expanded_opacities  # (n, num_spheres, 1)
        
        # Calculate weighted color contributions
        expanded_colors = self.colors.unsqueeze(0).expand(n, -1, -1)  # (n, num_spheres, 3)
        color_contributions = opacity_contribution.expand(-1, -1, 3) * expanded_colors  # (n, num_spheres, 3)
        
        # Sum contributions across all spheres
        weighted_colors = torch.sum(color_contributions, dim=1)  # (n, 3)
        total_opacity = torch.sum(opacity_contribution, dim=1)  # (n, 1)

        # Normalize colors to ensure they are in [0, 1]
        weighted_colors = weighted_colors / torch.clamp(total_opacity + 1e-6, min=1e-6)  # Avoid division by zero
        
        # Clamp total opacity to [0, 1]
        total_opacity = torch.clamp(total_opacity, 0.0, 1.0)
        
        # Combine weighted colors and opacity
        evaluated_values = torch.cat([weighted_colors, total_opacity], dim=1)  # (n, 4)
        
        return evaluated_values


class RayBatch:
    '''
    A class to represent a batch of rays.
    '''
    def __init__(self, origins: torch.Tensor, directions: torch.Tensor):
        '''
        Initialize the RayBatch with origins and directions.
        :param origins: (n, 3) tensor of ray origins
        :param directions: (n, 3) tensor of ray directions
        '''
        assert origins.dim() == 2 and origins.size(1) == 3, "Origins must be a (n, 3) tensor."
        assert directions.dim() == 2 and directions.size(1) == 3, "Directions must be a (n, 3) tensor."
        self.origins = origins
        self.directions = directions
        self.num_rays = origins.size(0)
    

def forward_render_raymarching (rays : RayBatch, primitives : VolumePrimitives) -> torch.Tensor:
    '''
    Forward render a batch of rays through the volume defined by the primitives.
    :param rays: RayBatch object containing origins and directions
    :param primitives: VolumePrimitives object defining the volume
    :return: (n, 4) tensor of rendered values (color and opacity)
    '''
    assert rays.origins.size(0) == rays.directions.size(0), "Origins and directions must have the same number of rays."
    
    marching_step_size = 0.1 # Step size for marching along the rays
    num_steps = int(world_radius * 4 / marching_step_size) # Number of steps to take along each ray

    # using non-exponential scattering media (linear media)
    ray_remaining_power = torch.ones((rays.num_rays, 1), device=default_device)
    ray_t = torch.zeros(rays.num_rays, device=default_device)  # Start at t=0 for each ray
    ray_colors = torch.zeros((rays.num_rays, 3), device=default_device)  # Initialize colors to zero

    for step in range(num_steps):
        # 注意：避免原地操作，使用新变量来保存值
        ray_positions = rays.origins + ray_t.unsqueeze(1) * rays.directions  # (n, 3)
        evaluated_values = primitives.evaluate_at_positions(ray_positions)
        ray_erode = torch.min(evaluated_values[:, 3:4] * marching_step_size, 
                              ray_remaining_power)
                              
        ray_remaining_power = ray_remaining_power - ray_erode  # 避免使用 -= 原地操作
        ray_colors = ray_colors + evaluated_values[:, :3] * ray_erode  # 避免使用 += 原地操作

        ray_t = ray_t + marching_step_size  # 避免使用 += 原地操作
    
    return torch.cat([ray_colors, 1 - ray_remaining_power], dim=1)  # (n, 4) tensor of rendered values (color and opacity)

def combine_uniform_distributions (old_means : torch.Tensor, old_scales : torch.Tensor, old_amplitudes : torch.Tensor, old_colors : torch.Tensor, 
                               new_means : torch.Tensor, new_scales : torch.Tensor, new_amplitudes: torch.Tensor, new_colors : torch.Tensor) -> dict:
    '''
    Pairwise combining multiple pairs of 1D uniform distributions into multiple 1D uniform distributions.
    :param old_means: (n,) tensor of old means
    :param old_scales: (n,) tensor of old scales
    :param old_amplitudes: (n,) tensor of old amplitudes
    :param old_colors: (n, 3) tensor of old colors
    :param new_means: (n,) tensor of new means
    :param new_scales: (n,) tensor of new scales
    :param new_amplitudes: (n,) tensor of new amplitudes
    :param new_colors: (n, 3) tensor of new colors
    :return: Dictionary with combined means, scales, amplitudes, and colors
    '''

    assert old_means.dim() == 1 and old_scales.dim() == 1 and old_amplitudes.dim() == 1 and old_colors.dim() == 2, "Old parameters must be 1D or 2D tensors."
    assert new_means.dim() == 1 and new_scales.dim() == 1 and new_amplitudes.dim() == 1 and new_colors.dim() == 2, "New parameters must be 1D or 2D tensors."
    assert old_means.size(0) == old_scales.size(0) == old_amplitudes.size(0) == old_colors.size(0), "Old parameters must have the same number of elements."
    assert new_means.size(0) == new_scales.size(0) == new_amplitudes.size(0) == new_colors.size(0), "New parameters must have the same number of elements."
    assert old_means.size(0) > 0 and new_means.size(0) > 0, "Old and new parameters must have at least one element."
    assert old_means.size(0) == old_scales.size(0) == old_amplitudes.size(0) == old_colors.size(0), "Old parameters must have the same number of elements."
    assert new_means.size(0) == new_scales.size(0) == new_amplitudes.size(0) == new_colors.size(0), "New parameters must have the same number of elements."

    # make sure old_xxx and new_xxx have 1 dimension
    if old_means.dim() == 2: old_means = old_means.squeeze(-1)  # (n)
    if old_scales.dim() == 2: old_scales = old_scales.squeeze(-1)  # (n)
    if old_amplitudes.dim() == 2: old_amplitudes = old_amplitudes.squeeze(-1)  # (n)
    if old_colors.dim() == 1: old_colors = old_colors.unsqueeze(0)  # (n, 3)
    if new_means.dim() == 2: new_means = new_means.squeeze(-1)  # (n)
    if new_scales.dim() == 2: new_scales = new_scales.squeeze(-1)  # (n)
    if new_amplitudes.dim() == 2: new_amplitudes = new_amplitudes.squeeze(-1)  # (n)
    if new_colors.dim() == 1: new_colors = new_colors.unsqueeze(0)  # (n, 3)

    
    old_x0 = old_means - old_scales # (n)
    old_x1 = old_means + old_scales # (n)
    new_x0 = new_means - new_scales # (n)
    new_x1 = new_means + new_scales # (n)
    
    # The comment defines y(x) and the solving condition:
    # y(x) = term(old_x0, old_x1, x, old_amplitudes) + term(new_x0, new_x1, x, new_amplitudes)
    # where term(x0, x1, x_eval, amp) = (torch.relu(x0 - x_eval) - torch.relu(x_eval - x1)) * amp.
    # We need to solve y(x) = 1. If y(x) < 1 for all x, return max(old_x1, new_x1).

    # 1. Characteristic coordinates are already available: old_x0, old_x1, new_x0, new_x1

    # 2. Create s_pts (sorted unique characteristic coordinates for each item)
    # Stack all coordinates for each item: (n, 4)
    all_coords = torch.stack([old_x0, old_x1, new_x0, new_x1], dim=-1)
    # Sort them for each item: (n, 4)
    s_pts_transposed, _ = torch.sort(all_coords, dim=-1)
    # Transpose to get s_pts[j, i] as j-th sorted coord for i-th item: (4, n)
    s_pts = s_pts_transposed.transpose(0, 1)

    # 3. Define the term function (vectorized)
    def _term_fn(x0, x1, x_eval, amp):
        return (torch.relu(x_eval - x0) - torch.relu(x_eval - x1)) * amp

    # 4. Calculate y(x) at each critical point s_pts[j]
    y_at_s = torch.zeros_like(s_pts)  # Shape (4, n)
    for j in range(4):
        y_at_s[j] = _term_fn(old_x0, old_x1, s_pts[j], old_amplitudes) + \
                      _term_fn(new_x0, new_x1, s_pts[j], new_amplitudes)

    # 5. Define target Y value and epsilon for float comparisons
    Y_TARGET = 1.0
    epsilon = 1e-9  # A small number for float comparisons

    # 6. Initialize result tensor `x_solved` with the fallback value
    # Fallback is max(old_x1, new_x1) if y(x) < 1 for all x.
    x_solved = torch.max(old_x1, new_x1).clone().to(dtype=old_means.dtype, device=old_means.device)
    # Keep track of which items have found a solution
    solved_mask = torch.zeros_like(old_means, dtype=torch.bool)

    # 7. Case: Solution occurs for x < s_pts[0], which is trivially impossible. No consideration needed.
    
    # 8. Case: Solution occurs within segments [s_pts[j], s_pts[j+1]]
    for j in range(3):  # Iterate through the 3 segments
        y0_seg = y_at_s[j]      # y value at start of segment
        y1_seg = y_at_s[j+1]    # y value at end of segment
        x0_seg = s_pts[j]      # x value at start of segment
        x1_seg = s_pts[j+1]    # x value at end of segment

        # Condition for Y_TARGET to be in the y-range of the segment [y1_seg, y0_seg] (inclusive)
        # and this item has not yet found a solution at a smaller x.
        qualifies_segment = (~solved_mask) & (y0_seg <= Y_TARGET - epsilon) & (y1_seg >= Y_TARGET + epsilon)
        
        denom = y1_seg - y0_seg
        is_flat_segment = torch.abs(denom) < epsilon

        # Calculate solution candidate for this segment
        # Case 1: Segment is flat
        solution_candidate_flat = x0_seg 
        update_cond_flat = qualifies_segment & is_flat_segment & (torch.abs(Y_TARGET - y0_seg) < epsilon)
        
        x_solved = torch.where(update_cond_flat, solution_candidate_flat, x_solved)
        solved_mask |= update_cond_flat
        
        # Case 2: Segment is not flat
        # Use a safe denominator to prevent division by zero.
        safe_denom = torch.where(is_flat_segment, torch.ones_like(denom), denom)
        # Ensure x1_seg - x0_seg is non-negative for safety, though s_pts are sorted.
        seg_length = torch.relu(x1_seg - x0_seg)
        solution_candidate_not_flat = x0_seg + (Y_TARGET - y0_seg) / safe_denom * seg_length
        
        update_cond_not_flat = qualifies_segment & (~is_flat_segment)
        
        x_solved = torch.where(update_cond_not_flat, solution_candidate_not_flat, x_solved)
        solved_mask |= update_cond_not_flat

    # 9. Case: Solution occurs for x > s_pts[3]. simply impossible. fallback to initial value is okay.

    # x_solved now contains the required x values.
    # If an item was never solved (e.g., all amplitudes are zero and Y_TARGET is not zero),
    # it retains its initial fallback value max(old_x1, new_x1).
    # The variable `x_solved` will be used by the subsequent parts of your function.
    
    # Now we need to compute the new means, scales, amplitudes, and colors based on the solved x values.
    old_integral = old_amplitudes * (torch.clamp(x_solved, min=old_x0, max=old_x1) - old_x0)
    new_integral = new_amplitudes * (torch.clamp(x_solved, min=new_x0, max=new_x1) - new_x0)
    total_integral = old_integral + new_integral # This should be <= 1

    # Okay, here we have to consider how to calculate the new means, scales, amplitudes, and colors.
    # There're 3 degree of freedoms. So we have to decide 3 crucial parameters to preserve. 
    # Firstly, mass (integral) and position (center of mass) should always be preserved. The method diverges on the third parameter selection.
    # There're two strategies: 1. Preserve volume boundaries. 2. Preserve volume variance.
    # Here, we use the first strategy, which is more intuitive and easier to implement.

    left_boundary = torch.min(old_x0, new_x0)
    segment_lengths = x_solved - left_boundary  # Length of each segment from the left boundary
    amplitudes = total_integral / torch.clamp(segment_lengths, min = 1e-6)  # Calculate amplitudes based on the integral and segment lengths
    clipped_old_means = (torch.min(old_x1, x_solved) + old_x0) / 2
    clipped_new_means = (torch.min(new_x1, x_solved) + new_x0) / 2
    means = (clipped_old_means * old_integral + clipped_new_means * new_integral) / torch.clamp(total_integral, min=1e-6)  # Weighted average of means based on integrals
    scales = segment_lengths / 2  # Scales are half the segment lengths
    # Colors are averaged similarly to means, weighted by the integrals
    colors = old_integral.unsqueeze(-1) * old_colors + new_integral.unsqueeze(-1) * new_colors
    colors = colors / torch.clamp(total_integral.unsqueeze(-1), min=1e-4)  # Normalize colors by total integral

    return {'means': means, 'scales': scales, 'amplitudes': amplitudes, 'colors': colors}

class PinHoleCamera:
    def __init__(self, position: torch.Tensor, look_at: torch.Tensor, up: torch.Tensor, fov: float, device='cpu'):
        '''
        Initialize the pinhole camera.
        :param position: (3,) tensor of camera position
        :param look_at: (3,) tensor of point the camera is looking at
        :param up: (3,) tensor representing the up direction of the camera
        :param fov: Field of view in degrees (y-axis)
        :param device: Device to use for tensors
        '''
        self.position = position.to(device)
        self.look_at = look_at.to(device)
        self.up = up.to(device)
        self.fov = fov
        self.device = device
        self.near = 0.01
        self.far = 100.0  # Far plane distance

    def get_direction(self) -> torch.Tensor:
        '''
        Return the direction vector from the camera position to the look_at point.
        :return: (3,) tensor representing the direction vector
        '''
        direction = self.look_at - self.position
        return direction / torch.norm(direction, dim=-1, keepdim=True)

    def get_view_matrix(self, rhs = True) -> torch.Tensor:
        '''
        Return the view matrix for the camera.
        :return: (4, 4) tensor representing the view matrix
        '''
        forward = (self.look_at - self.position)
        forward = forward / torch.norm(forward, dim=-1, keepdim=True)  # Normalize forward vector
        right = torch.cross(forward, self.up)
        right = right / torch.norm(right, dim=-1, keepdim=True)  # Normalize right vector
        up = torch.cross(right, forward)
        # up is already normalized because right and forward are orthonormal and unit length.
        # up = up / torch.norm(up, dim=-1, keepdim=True)
        
        # Create the view matrix
        view_matrix = torch.eye(4, device=self.device, dtype=torch.float32)
        if not rhs:
            forward = -forward  # Negate forward vector for left-handed coordinate system
        
        # The rotation part of the view matrix (R) should have the camera's basis vectors as rows.
        # This is the transpose of the camera's orientation matrix in the world.
        rot_mat = torch.stack([right, up, -forward], dim=0)
        view_matrix[:3, :3] = rot_mat
        
        # The translation part of the view matrix is -R * camera_position.
        view_matrix[:3, 3] = -rot_mat @ self.position
        return view_matrix
    
    def get_perspective_matrix(self, rhs=False) -> torch.Tensor:
        '''
        Return the perspective projection matrix for the camera.
        Z range is mapped to [0, 1] (Direct3D-style).
        :param rhs: If True, assumes a right-handed view space (like OpenGL, camera looks down -Z).
                    If False (default), assumes a left-handed view space (like Direct3D, camera looks down +Z).
        :return: (4, 4) tensor representing the perspective projection matrix
        '''
        aspect_ratio = 1.0  # Assuming a square image for simplicity
        fov_rad = np.deg2rad(self.fov)
        tan_half_fov = np.tan(fov_rad / 2)
        
        # Create the perspective projection matrix with proper near and far planes
        perspective_matrix = torch.zeros((4, 4), device=self.device, dtype=torch.float32)
        perspective_matrix[0, 0] = 1 / (aspect_ratio * tan_half_fov)
        perspective_matrix[1, 1] = 1 / tan_half_fov
        
        if rhs:
            # Right-handed system (OpenGL-style view space): z from [-far, -near] mapped to [0, 1]
            # Camera looks down -Z. w_clip = -z_view
            perspective_matrix[2, 2] = self.far / (self.near - self.far)
            perspective_matrix[2, 3] = (self.far * self.near) / (self.near - self.far)
            perspective_matrix[3, 2] = -1.0
        else:
            # Left-handed system (Direct3D-style view space): z from [near, far] mapped to [0, 1]
            # Camera looks down +Z. w_clip = z_view
            perspective_matrix[2, 2] = self.far / (self.far - self.near)
            perspective_matrix[2, 3] = -(self.far * self.near) / (self.far - self.near)
            perspective_matrix[3, 2] = 1.0
        
        # perspective_matrix[3, 3] is 0 by default
        
        return perspective_matrix
    
    def get_perspective_view_matrix(self, **kwargs) -> torch.Tensor:
        '''
        Return the combined perspective view matrix for the camera.
        :return: (4, 4) tensor representing the perspective view matrix
        '''
        view_matrix = self.get_view_matrix(**kwargs)
        perspective_matrix = self.get_perspective_matrix(**kwargs)
        # Combine the two matrices
        return perspective_matrix @ view_matrix

    def generate_rays(self, width: int, height: int) -> RayBatch:
        '''
        Generate rays for the camera.
        :param width: Width of the image
        :param height: Height of the image
        :return: RayBatch object containing origins and directions
        '''
        aspect_ratio = width / height
        fov_rad = np.deg2rad(self.fov)
        
        # Calculate camera basis vectors
        forward = (self.look_at - self.position)
        right = torch.cross(forward, self.up)
        up = torch.cross(right, forward)
        # Normalize after all cross products to maintain orthogonality as much as possible
        # given the initial up vector.
        forward = forward / torch.norm(forward, dim=-1, keepdim=True)
        right = right / torch.norm(right, dim=-1, keepdim=True)
        up = up / torch.norm(up, dim=-1, keepdim=True)
        
        # Calculate pixel coordinates
        pixel_x = (torch.arange(width, device=self.device, dtype=torch.float32) - width / 2 + 0.5) / (width / 2) * np.tan(fov_rad / 2) * aspect_ratio
        pixel_y = (torch.arange(height, device=self.device, dtype=torch.float32) - height / 2 + 0.5) / (height / 2) * np.tan(fov_rad / 2)
        
        # Create a grid of pixel coordinates
        # Use indexing='xy' to ensure row-major order of rays for final reshape
        pixel_x_grid, pixel_y_grid = torch.meshgrid(pixel_x, pixel_y, indexing='xy')
        
        # Calculate ray directions
        # forward, right, up are (3), pixel_x_grid, pixel_y_grid are (H, W)
        # Unsqueeze forward, right, up to be (1, 1, 3) for broadcasting with (H, W, 1)
        ray_directions = (forward.unsqueeze(0).unsqueeze(0) + 
                          pixel_x_grid.unsqueeze(-1) * right.unsqueeze(0).unsqueeze(0) + 
                          pixel_y_grid.unsqueeze(-1) * up.unsqueeze(0).unsqueeze(0))
        ray_directions = ray_directions.reshape(-1, 3)
        # Normalize ray directions
        ray_directions = ray_directions / torch.norm(ray_directions, dim=-1, keepdim=True)

        return RayBatch(self.position.expand(ray_directions.size(0), -1), ray_directions)

def ray_intersect_with_spheres (rays: RayBatch, primitives: UniformVolumePrimitives) -> tuple[torch.Tensor, torch.Tensor]:
    '''
    Intersect rays with spheres defined by the primitives.
    :param rays: RayBatch object containing origins and directions
    :param primitives: UniformVolumePrimitives object defining the volume
    :return: (n, num_spheres) tensor of valid mask, (n, num_spheres, 2) tensor of intersection distances for each ray, where each row contains (t_near, t_far)
    '''
    n = rays.num_rays
    num_spheres = primitives.num_spheres
    # Assemble sphere transforms
    # to_world_rot_scale_matrices = torch.bmm(build_rotation_matrices(primitives.rotations), build_scale_matrices(primitives.scales))  # (num_spheres, 3, 3)
    # to_local_rot_scale_matrices = torch.inverse(to_world_rot_scale_matrices)  # (num_spheres, 3, 3)
    to_local_rot_scale_matrices = torch.bmm(build_scale_matrices(1 / primitives.scales), build_rotation_matrices(primitives.rotations).transpose(1, 2))  # (num_spheres, 3, 3)
    # Transform rays to local sphere spaces
    local_ray_origins = rays.origins.unsqueeze(1) - primitives.centers.unsqueeze(0)  # (n, num_spheres, 3)
    to_local_rot_scale_matrices = to_local_rot_scale_matrices.unsqueeze(0).expand(n, num_spheres, 3, 3).reshape(-1, 3, 3)  # (n * num_spheres, 3, 3)
    local_ray_origins = torch.bmm(to_local_rot_scale_matrices, local_ray_origins.reshape(-1, 3, 1))  # (n*num_spheres, 3, 1)
    local_ray_origins = local_ray_origins.squeeze(2).reshape(n, num_spheres, 3)  # (n, num_spheres, 3)
    local_ray_directions = rays.directions.unsqueeze(1).expand(n, num_spheres, 3)  # (n, num_spheres, 3)
    local_ray_directions = torch.bmm(to_local_rot_scale_matrices, local_ray_directions.reshape(-1, 3, 1))  # (n*num_spheres, 3, 1)
    local_ray_directions = local_ray_directions.squeeze(2).reshape(n, num_spheres, 3)  # (n, num_spheres, 3)
    # Calculate intersection with spheres in local space
    a = torch.sum(local_ray_directions**2, dim=-1)  # |direction|^2 (n, num_spheres)
    b = 2 * torch.sum(local_ray_origins * local_ray_directions, dim=-1)  # 2 dot(direction, origin) (n, num_spheres)
    c = torch.sum(local_ray_origins**2, dim=-1) - 1.0  # |origin|^2 - 1 (n, num_spheres)
    # Solve quadratic equation: a*t^2 + b*t + c = 0
    discriminant = b**2 - 4 * a * c  # (n, num_spheres)
    # Find valid intersections
    valid_mask = discriminant >= 0  # (n, num_spheres)
    t_near = torch.zeros((n, num_spheres), device=rays.origins.device)  # Initialize t_near
    t_far = torch.zeros((n, num_spheres), device=rays.origins.device)  # Initialize t_far
    if valid_mask.any():
        sqrt_discriminant = torch.sqrt(discriminant[valid_mask])
        t_near[valid_mask] = (-b[valid_mask] - sqrt_discriminant) / (2 * a[valid_mask])  # t_near
        t_far[valid_mask] = (-b[valid_mask] + sqrt_discriminant) / (2 * a[valid_mask])  # t_far
    return valid_mask, torch.stack([t_near, t_far], dim=-1)  # (n, num_spheres), (n, num_spheres, 2)

# @torch.compile
def forward_render_sorting_for_uniform_primitives (camera : PinHoleCamera, width: int, height: int, primitives : UniformVolumePrimitives) -> torch.Tensor:
    '''
    Forward render a camera through the uniform volume primitives defined by the primitives.
    (Render with sorting and combining primitives.)
    :param camera: PinHoleCamera object
    :param width: Width of the image
    :param height: Height of the image
    :param primitives: UniformVolumePrimitives object defining the volume
    :return: (n, 4) tensor of rendered values (color and opacity)
    '''
    rays = camera.generate_rays(width, height)
    # Intersect rays with spheres
    valid_mask, intersection_distances = ray_intersect_with_spheres(rays, primitives) # (n, num_spheres), (n, num_spheres, 2)
    # Initialize rendered values (1d uniform colored distributions)
    means = torch.zeros(rays.num_rays, device=default_device)
    scales = torch.zeros(rays.num_rays, device=default_device)
    amplitudes = torch.zeros(rays.num_rays, device=default_device)
    colors = torch.zeros((rays.num_rays, 3), device=default_device)
    valid = torch.zeros(rays.num_rays, dtype=torch.bool, device=default_device)
    # Per pixel merging
    for i in range(primitives.num_spheres):
        # consider the effect of the i-th primitive
        new_valid_mask = valid_mask[:, i]
        old_valid_mask = valid
        # 1. For each currently invalid distribution, if the new distribution is valid, copy it.
        should_copy_mask = ~old_valid_mask & new_valid_mask
        means[should_copy_mask] = intersection_distances[should_copy_mask, i, 0]
        scales[should_copy_mask] = primitives.scales[i, 0]
        amplitudes[should_copy_mask] = primitives.opacities[i, 0]
        colors[should_copy_mask] = primitives.colors[i, :]
        valid[should_copy_mask] = True
        # 2. For each currently valid distribution, if the new distribution is valid, combine them.
        should_combine_mask = old_valid_mask & new_valid_mask
        if should_combine_mask.any():
            indices = torch.where(should_combine_mask)[0]
            old_means = means[indices]
            old_scales = scales[indices]
            old_amplitudes = amplitudes[indices]
            old_colors = colors[indices, :]
            min_distances = intersection_distances[indices, i, 0]  # (k,) where k is the number of indices
            max_distances = intersection_distances[indices, i, 1]  # (k,) where k is the number of indices
            min_distances = torch.clamp(min_distances, min=0.0)  # Ensure non-negative distances
            max_distances = torch.clamp(max_distances, min=0.0) 
            new_means  = (min_distances + max_distances) / 2 # (k,) where k is the number of indices
            new_scales = (max_distances - min_distances) / 2 # (k,) where k is the number of indices
            # The features of the new distributions are coming from the i-th primitive. Just duplicate them to match the number of indices.
            new_amplitudes = primitives.opacities[i, 0].unsqueeze(0).expand_as(old_amplitudes)  # (k,) where k is the number of indices
            new_colors = primitives.colors[i, :].unsqueeze(0).expand_as(old_colors)  # (k, 3) where k is the number of indices
            # print(new_means.shape, new_scales.shape, new_amplitudes.shape, new_colors.shape)
            # print(old_means.shape, old_scales.shape, old_amplitudes.shape, old_colors.shape)
            indices_2 = torch.where(new_scales > 0)[0]  # Indices where new scales are positive
            if indices_2.numel() != 0:
                combined = checkpoint(combine_uniform_distributions,
                                    old_means[indices_2], old_scales[indices_2], old_amplitudes[indices_2], old_colors[indices_2],
                                    new_means[indices_2], new_scales[indices_2], new_amplitudes[indices_2], new_colors[indices_2],
                                    use_reentrant=False)
                means[indices[indices_2]] = combined['means']
                scales[indices[indices_2]] = combined['scales']
                amplitudes[indices[indices_2]] = combined['amplitudes']
                colors[indices[indices_2], :] = combined['colors']
                
    # Okay, we have the means, scales, amplitudes, and colors for each ray.
    # Compute color and opacity now.
    # In linear scattering media, the opacity is simply the integral.
    # And the color is simply the color of the final distribution.
    rendered_values = torch.zeros((rays.num_rays, 4), device=default_device)
    rendered_colors = colors * (amplitudes * scales).unsqueeze(-1) # Color is the color of the final distribution (normalized)
    rendered_opacities = amplitudes * scales  # Opacity is the integral of the final distribution
    # print(rendered_colors.shape, rendered_opacities.shape)
    rendered_values[:, :3] = rendered_colors
    rendered_values[:, 3] = rendered_opacities
    # # Detect abnormalties (colors ranging below -1 and above 2)
    # abnormal_mask = (rendered_values[:, :3] < -1) | (rendered_values[:, :3] > 2)
    # if abnormal_mask.any():
    #     print(f"Warning: Abnormal values detected in rendered colors at indices: {torch.where(abnormal_mask)[0]}")
    #     print(f"MinVal: {rendered_values[:, :3][abnormal_mask].min(dim=0).values}, MaxVal: {rendered_values[:, :3][abnormal_mask].max(dim=0).values}")
    return rendered_values  # (n, 4) tensor of rendered values (color and opacity)


def render_scene(camera: PinHoleCamera, primitives: VolumePrimitives, width: int, height: int) -> torch.Tensor:
    '''
    Render the scene from the camera's perspective.
    :param camera: PinHoleCamera object
    :param primitives: VolumePrimitives object defining the volume
    :param width: Width of the image
    :param height: Height of the image
    :return: (height, width, 4) tensor of rendered image (color and opacity)
    '''
    # torch.autograd.set_detect_anomaly(True)
    rays = camera.generate_rays(width, height)
    if isinstance(primitives, UniformVolumePrimitives):
        # print('Uniform volume primitives detected, using forward render with sorting.')
        rendered_values = forward_render_sorting_for_uniform_primitives(camera, width, height, primitives)
    else: rendered_values = forward_render_raymarching(rays, primitives)
    
    return rendered_values.reshape(height, width, 4)  # Reshape to (height, width, 4) for image output
