# Copyright (c) Facebook, Inc. and its affiliates.
# 
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
    division,
    absolute_import,
    with_statement,
    print_function,
    unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys

try:
    import builtins
except:
    import __builtin__ as builtins

# try:
#     import pointnet2._ext as _ext
# except ImportError:
#     if not getattr(builtins, "__POINTNET2_SETUP__", False):
#         raise ImportError(
#             "Could not import _ext module.\n"
#             "Please see the setup instructions in the README: "
#             "https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
#         )

if False:
    # Workaround for type hints without depending on the `typing` module
    from typing import *

def ext_gather_points(points: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
    """
    从输入点云中根据索引收集点。
    
    参数:
    points (torch.Tensor): 输入点云，形状为 (b, c, n)
    idx (torch.Tensor): 索引，形状为 (b, m)
    
    返回:
    torch.Tensor: 收集的点，形状为 (b, c, m)
    """
    b, c, n = points.size()
    m = idx.size(1)
    idx = idx.unsqueeze(1).expand(-1, c, -1)  # 扩展索引以匹配通道维度
    out = torch.gather(points, 2, idx)
    return out

def ext_gather_points_grad(grad_out: torch.Tensor, idx: torch.Tensor, n: int) -> torch.Tensor:
    """
    计算 gather_points 操作的梯度。
    
    参数:
    grad_out (torch.Tensor): 输出的梯度，形状为 (b, c, m)
    idx (torch.Tensor): 索引，形状为 (b, m)
    n (int): 输入点云的点数
    
    返回:
    torch.Tensor: 输入的梯度，形状为 (b, c, n)
    """
    b, c, m = grad_out.size()
    grad_points = torch.zeros((b, c, n), device=grad_out.device, dtype=grad_out.dtype)
    grad_out = grad_out.contiguous()
    idx = idx.unsqueeze(1).expand(-1, c, -1)  # 扩展索引以匹配通道维度
    grad_points.scatter_add_(2, idx, grad_out)
    return grad_points

# def furthest_point_sampling(points, nsamples):
#     """
#     最远点采样。
    
#     参数:
#     points (torch.Tensor): 输入点云，形状为 (b, n, 3)
#     nsamples (int): 采样点的数量
    
#     返回:
#     torch.Tensor: 采样点的索引，形状为 (b, nsamples)
#     """
#     b, n, _ = points.size()
#     idxs = torch.zeros((b, nsamples), dtype=torch.long, device=points.device)
#     temp = torch.full((b, n), 1e10, device=points.device)

#     batch_indices = torch.arange(b, dtype=torch.long, device=points.device).unsqueeze(-1)
#     idxs[:, 0] = 0  # 初始点
#     old = idxs[:, 0]

#     for i in range(1, nsamples):
#         # 计算当前点到所有点的距离
#         old_points = points[batch_indices, old].unsqueeze(1)  # (b, 1, 3)
#         dist = torch.sum((points - old_points) ** 2, dim=-1)  # (b, n)
#         dist = torch.min(dist, temp)  # 更新最小距离
#         temp = dist
#         # 找到最远点
#         _, besti = torch.max(dist, dim=1)
#         idxs[:, i] = besti
#         old = besti

#     return idxs
# @torch.jit.script
def ext_furthest_point_sampling(points:torch.Tensor, nsamples:int) -> torch.Tensor:
    """
    最远点采样。
    
    参数:
    points (torch.Tensor): 输入点云，形状为 (b, n, 3)
    nsamples (int): 采样点的数量
    
    返回:
    torch.Tensor: 采样点的索引，形状为 (b, nsamples)
    """
    b, n, _ = points.size()
    idxs = torch.zeros((b, nsamples), dtype=torch.long, device=points.device)
    temp = torch.full((b, n), 1e10, device=points.device)
    idxs[:, 0] = 0  # 初始点

    for i in range(1, nsamples):
        last_idx = idxs[:, i - 1].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, 3)
        last_points = torch.gather(points, 1, last_idx)
        # 使用 cdist 计算距离
        dist = torch.cdist(last_points, points).squeeze(1)
        temp = torch.min(temp, dist)
        _, besti = torch.max(temp, dim=1)
        idxs[:, i] = besti
    # idxs = idxs.to(torch.int)
    return idxs
# def ext_furthest_point_sampling(points, nsamples):
#     """
#     最远点采样。
    
#     参数:
#     points (torch.Tensor): 输入点云，形状为 (b, n, 3)
#     nsamples (int): 采样点的数量
    
#     返回:
#     torch.Tensor: 采样点的索引，形状为 (b, nsamples)
#     """
#     b, n, _ = points.size()
#     idxs = torch.zeros((b, nsamples), dtype=torch.long, device=points.device)
#     temp = torch.full((b, n), 1e10, device=points.device)

#     batch_indices = torch.arange(b, dtype=torch.long, device=points.device).unsqueeze(-1)
#     idxs[:, 0] = 0  # 初始点
#     old = idxs[:, 0]

#     for i in range(1, nsamples):
#         # 计算当前点到所有点的距离
#         old_points = points[batch_indices, old].unsqueeze(1)  # (b, 1, 3)
#         dist = torch.sum((points - old_points) ** 2, dim=-1)  # (b, n)
#         dist = torch.min(dist, temp)  # 更新最小距离
#         temp = dist
#         # 找到最远点
#         _, besti = torch.max(dist, dim=1)
#         idxs[:, i] = besti
#         old = besti

#     return idxs

class RandomDropout(nn.Module):
    def __init__(self, p=0.5, inplace=False):
        super(RandomDropout, self).__init__()
        self.p = p
        self.inplace = inplace

    def forward(self, X):
        theta = torch.Tensor(1).uniform_(0, self.p)[0]
        return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)


class FurthestPointSampling(Function):
    @staticmethod
    def forward(ctx, xyz, npoint):
        # type: (Any, torch.Tensor, int) -> torch.Tensor
        r"""
        Uses iterative furthest point sampling to select a set of npoint features that have the largest
        minimum distance

        Parameters
        ----------
        xyz : torch.Tensor
            (B, N, 3) tensor where N > npoint
        npoint : int32
            number of features in the sampled set

        Returns
        -------
        torch.Tensor
            (B, npoint) tensor containing the set
        """
        # return _ext.furthest_point_sampling(xyz, npoint)
        return ext_furthest_point_sampling(xyz, npoint)


    @staticmethod
    def backward(xyz, a=None):
        return None, None


# furthest_point_sample = FurthestPointSampling.apply

furthest_point_sample = ext_furthest_point_sampling

class GatherOperation(Function):
    @staticmethod
    def forward(ctx, features, idx):
        # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
        r"""

        Parameters
        ----------
        features : torch.Tensor
            (B, C, N) tensor

        idx : torch.Tensor
            (B, npoint) tensor of the features to gather

        Returns
        -------
        torch.Tensor
            (B, C, npoint) tensor
        """

        _, C, N = features.size()

        ctx.for_backwards = (idx, C, N)
        # return _ext.gather_points(features, idx)
        return ext_gather_points(features, idx)

    @staticmethod
    def backward(ctx, grad_out):
        idx, C, N = ctx.for_backwards

        # grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
        grad_features = ext_gather_points_grad(grad_out.contiguous(), idx, N)

        return grad_features, None


gather_operation = GatherOperation.apply

def ext_three_nn(unknowns: torch.Tensor, knows: torch.Tensor) -> tuple:
    """
    对每个未知点，找出距离最近的三个已知点及其距离平方。

    参数:
    unknowns (torch.Tensor): 形状为 (b, n, 3) 的未知点张量
    knows (torch.Tensor): 形状为 (b, m, 3) 的已知点张量

    返回:
    tuple: 包含距离平方 (b, n, 3) 和索引 (b, n, 3) 的元组
    """
    b, n, _ = unknowns.size()
    m = knows.size(1)
    # 扩展维度以广播计算距离
    unknowns_expanded = unknowns.unsqueeze(2).expand(-1, -1, m, -1)  # (b, n, m, 3)
    knows_expanded = knows.unsqueeze(1).expand(-1, n, -1, -1)  # (b, n, m, 3)
    # 计算距离平方
    dist2 = torch.sum((unknowns_expanded - knows_expanded) ** 2, dim=-1)  # (b, n, m)
    # 获取距离最小的三个点的距离和索引
    dist2_topk, idx = torch.topk(dist2, 3, dim=-1, largest=False)  # (b, n, 3)
    return dist2_topk, idx

def ext_three_interpolate(points: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
    """
    根据三个最近点及其权重进行插值。

    参数:
    points (torch.Tensor): 形状为 (b, c, m) 的点特征张量
    idx (torch.Tensor): 形状为 (b, n, 3) 的索引张量
    weight (torch.Tensor): 形状为 (b, n, 3) 的权重张量

    返回:
    torch.Tensor: 形状为 (b, c, n) 的插值结果张量
    """
    b, c, m = points.size()
    n = idx.size(1)
    # 扩展权重维度以匹配特征维度
    weight = weight.unsqueeze(1).expand(-1, c, -1, -1)  # (b, c, n, 3)

    # 扩展 points 维度以匹配索引张量维度
    points_expanded = points.unsqueeze(-1).expand(-1, -1, -1, n * 3)  # (b, c, m, n * 3)
    points_expanded = points_expanded.view(b, c, m, n, 3)  # (b, c, m, n, 3)
    
    # 扩展索引维度
    idx_expanded = idx.unsqueeze(1).expand(-1, c, -1, -1)  # (b, c, n, 3)
    idx_expanded = idx_expanded.unsqueeze(2)  # (b, c, 1, n, 3)
    # # 根据索引获取对应点的特征
    # points_gathered = points.gather(2, idx.unsqueeze(1).expand(-1, c, -1, -1))  # (b, c, n, 3)

    # 根据索引获取对应点的特征
    idx_expanded = idx_expanded.to(dtype=torch.int64)
    points_gathered = torch.gather(points_expanded, 2, idx_expanded)  # (b, c, 1, n, 3)
    points_gathered = points_gathered.squeeze(2)  # (b, c, n, 3)

    # 加权求和
    out = (points_gathered * weight).sum(dim=-1)  # (b, c, n)
    return out

def ext_three_interpolate_grad(grad_out: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor, m: int) -> torch.Tensor:
    """
    计算 three_interpolate 操作的梯度。

    参数:
    grad_out (torch.Tensor): 形状为 (b, c, n) 的输出梯度张量
    idx (torch.Tensor): 形状为 (b, n, 3) 的索引张量
    weight (torch.Tensor): 形状为 (b, n, 3) 的权重张量
    m (int): 输入点的数量

    返回:
    torch.Tensor: 形状为 (b, c, m) 的输入梯度张量
    """
    b, c, n = grad_out.size()
    # 扩展梯度和权重维度
    grad_out_expanded = grad_out.unsqueeze(-1).expand(-1, -1, -1, 3)  # (b, c, n, 3)
    weight_expanded = weight.unsqueeze(1).expand(-1, c, -1, -1)  # (b, c, n, 3)
    # 计算每个点的梯度贡献
    grad_contribution = grad_out_expanded * weight_expanded  # (b, c, n, 3)
    # 初始化输入梯度
    grad_points = torch.zeros((b, c, m), device=grad_out.device, dtype=grad_out.dtype)
    # 累加梯度
    grad_points.scatter_add_(2, idx.unsqueeze(1).expand(-1, c, -1, -1), grad_contribution)
    return grad_points


class ThreeNN(Function):
    @staticmethod
    def forward(ctx, unknown, known):
        # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""
            Find the three nearest neighbors of unknown in known
        Parameters
        ----------
        unknown : torch.Tensor
            (B, n, 3) tensor of known features
        known : torch.Tensor
            (B, m, 3) tensor of unknown features

        Returns
        -------
        dist : torch.Tensor
            (B, n, 3) l2 distance to the three nearest neighbors
        idx : torch.Tensor
            (B, n, 3) index of 3 nearest neighbors
        """
        # dist2, idx = _ext.three_nn(unknown, known)
        dist2, idx = ext_three_nn(unknown, known)
        return torch.sqrt(dist2), idx

    @staticmethod
    def backward(ctx, a=None, b=None):
        return None, None


three_nn = ThreeNN.apply


class ThreeInterpolate(Function):
    @staticmethod
    def forward(ctx, features, idx, weight):
        # type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
        r"""
            Performs weight linear interpolation on 3 features
        Parameters
        ----------
        features : torch.Tensor
            (B, c, m) Features descriptors to be interpolated from
        idx : torch.Tensor
            (B, n, 3) three nearest neighbors of the target features in features
        weight : torch.Tensor
            (B, n, 3) weights

        Returns
        -------
        torch.Tensor
            (B, c, n) tensor of the interpolated features
        """
        B, c, m = features.size()
        n = idx.size(1)

        ctx.three_interpolate_for_backward = (idx, weight, m)

        # return _ext.three_interpolate(features, idx, weight)
        return ext_three_interpolate(features, idx, weight)

    @staticmethod
    def backward(ctx, grad_out):
        # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        grad_out : torch.Tensor
            (B, c, n) tensor with gradients of ouputs

        Returns
        -------
        grad_features : torch.Tensor
            (B, c, m) tensor with gradients of features

        None

        None
        """
        idx, weight, m = ctx.three_interpolate_for_backward

        # grad_features = _ext.three_interpolate_grad(
        #     grad_out.contiguous(), idx, weight, m
        # )

        grad_features = ext_three_interpolate_grad(
            grad_out.contiguous(), idx, weight, m
        )

        return grad_features, None, None


three_interpolate = ThreeInterpolate.apply

# def ext_group_points(points, idx):
#     """
#     根据索引对特征进行分组操作。

#     参数:
#     points (torch.Tensor): 输入特征，形状为 (B, C, N)
#     idx (torch.Tensor): 索引，形状为 (B, npoint, nsample)

#     返回:
#     torch.Tensor: 分组后的特征，形状为 (B, C, npoint, nsample)
#     """
#     B, C, N = points.size()
#     npoint, nsample = idx.size(1), idx.size(2)
#     # 扩展索引维度以匹配特征维度
#     idx_expanded = idx.unsqueeze(1).expand(-1, C, -1, -1)  # (B, C, npoint, nsample)
#     # 使用 gather 函数根据索引收集特征
#     grouped_points = torch.gather(points, 2, idx_expanded)
#     return grouped_points

def ext_group_points(points: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
    """
    根据索引对特征进行分组操作。

    参数:
    points (torch.Tensor): 输入特征，形状为 (B, C, N)
    idx (torch.Tensor): 索引，形状为 (B, npoint, nsample)

    返回:
    torch.Tensor: 分组后的特征，形状为 (B, C, npoint, nsample)
    """
    B, C, N = points.size()
    npoint, nsample = idx.size(1), idx.size(2)
    # 检查索引是否越界，将越界索引置为 0
    idx = torch.clamp(idx, 0, N - 1)
    # 扩展索引维度以匹配特征维度
    idx_expanded = idx.unsqueeze(1).expand(-1, C, -1, -1)  # (B, C, npoint, nsample)
    # 扩展 points 维度以匹配 idx_expanded
    points_expanded = points.unsqueeze(-1).expand(-1, -1, -1, npoint * nsample)  # (B, C, N, npoint * nsample)
    points_expanded = points_expanded.view(B, C, N, npoint, nsample)  # (B, C, N, npoint, nsample)
    
    # 使用 gather 函数根据索引收集特征
    grouped_points = torch.gather(points_expanded, 2, idx_expanded.unsqueeze(2))  # (B, C, 1, npoint, nsample)
    grouped_points = grouped_points.squeeze(2)  # (B, C, npoint, nsample)
    return grouped_points

def ext_group_points_grad(grad_out: torch.Tensor, idx: torch.Tensor, n: int) -> torch.Tensor:
    """
    计算 group_points 操作的梯度。

    参数:
    grad_out (torch.Tensor): 输出的梯度，形状为 (B, C, npoint, nsample)
    idx (torch.Tensor): 索引，形状为 (B, npoint, nsample)
    n (int): 输入特征的点数

    返回:
    torch.Tensor: 输入的梯度，形状为 (B, C, n)
    """
    B, C, npoint, nsample = grad_out.size()
    # 初始化输入梯度
    grad_points = torch.zeros((B, C, n), device=grad_out.device, dtype=grad_out.dtype)
    # 扩展索引维度以匹配梯度维度
    idx_expanded = idx.unsqueeze(1).expand(-1, C, -1, -1)  # (B, C, npoint, nsample)
    # 使用 scatter_add_ 函数累加梯度
    grad_points.scatter_add_(2, idx_expanded, grad_out)
    return grad_points

class GroupingOperation(Function):
    @staticmethod
    def forward(ctx, features, idx):
        # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
        r"""

        Parameters
        ----------
        features : torch.Tensor
            (B, C, N) tensor of features to group
        idx : torch.Tensor
            (B, npoint, nsample) tensor containing the indicies of features to group with

        Returns
        -------
        torch.Tensor
            (B, C, npoint, nsample) tensor
        """
        B, nfeatures, nsample = idx.size()
        _, C, N = features.size()

        ctx.for_backwards = (idx, N)

        # return _ext.group_points(features, idx)
        idx = idx.to(torch.int64)
        return ext_group_points(features, idx)


    @staticmethod
    def backward(ctx, grad_out):
        # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""

        Parameters
        ----------
        grad_out : torch.Tensor
            (B, C, npoint, nsample) tensor of the gradients of the output from forward

        Returns
        -------
        torch.Tensor
            (B, C, N) gradient of the features
        None
        """
        idx, N = ctx.for_backwards

        # grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
        grad_features = ext_group_points_grad(grad_out.contiguous(), idx, N)


        return grad_features, None


grouping_operation = GroupingOperation.apply
# @torch.jit.script
# def ext_ball_query(radius:float, nsample:int, xyz:torch.Tensor, new_xyz:torch.Tensor) -> torch.Tensor:

# # def ext_ball_query(new_xyz:torch.Tensor, xyz:torch.Tensor, radius:float, nsample:int) -> torch.Tensor:
#     """
#     在给定点云中查询每个点的球形邻域内的点。

#     参数:
#     new_xyz (torch.Tensor): 形状为 (B, M, 3) 的查询点坐标张量
#     xyz (torch.Tensor): 形状为 (B, N, 3) 的输入点云坐标张量
#     radius (float): 球形邻域的半径
#     nsample (int): 每个查询点最多采样的点数

#     返回:
#     torch.Tensor: 形状为 (B, M, nsample) 的索引张量，存储每个查询点邻域内点的索引
#     """
#     assert new_xyz.is_contiguous() and xyz.is_contiguous(), "Input tensors must be contiguous"
#     assert new_xyz.dtype == torch.float32 and xyz.dtype == torch.float32, "Input tensors must be float32"

#     B, M, _ = new_xyz.size()
#     N = xyz.size(1)

#     # 初始化索引张量
#     idx = torch.full((B, M, nsample), -1, dtype=torch.long, device=new_xyz.device)

#     # 扩展维度以进行广播计算
#     new_xyz_expanded = new_xyz.unsqueeze(2).expand(-1, -1, N, -1)  # (B, M, N, 3)
#     xyz_expanded = xyz.unsqueeze(1).expand(-1, M, -1, -1)  # (B, M, N, 3)

#     # 计算距离平方
#     dist2 = torch.sum((new_xyz_expanded - xyz_expanded) ** 2, dim=-1)  # (B, M, N)
#     radius2 = radius * radius

#     # 获取距离小于半径平方的点的索引
#     mask = dist2 < radius2  # (B, M, N)

#     for b in range(B):
#         for m in range(M):
#             valid_indices = torch.nonzero(mask[b, m]).squeeze(-1)
#             num_valid = min(valid_indices.numel(), nsample)
#             if num_valid > 0:
#                 idx[b, m, :num_valid] = valid_indices[:num_valid]
#     return idx
# def ext_ball_query(radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
# # def ext_ball_query(new_xyz, xyz, radius, nsample):
#     """
#     在给定点云中查询每个点的球形邻域内的点。

#     参数:
#     new_xyz (torch.Tensor): 形状为 (B, M, 3) 的查询点坐标张量
#     xyz (torch.Tensor): 形状为 (B, N, 3) 的输入点云坐标张量
#     radius (float): 球形邻域的半径
#     nsample (int): 每个查询点最多采样的点数

#     返回:
#     torch.Tensor: 形状为 (B, M, nsample) 的索引张量，存储每个查询点邻域内点的索引
#     """
#     assert new_xyz.is_contiguous() and xyz.is_contiguous(), "Input tensors must be contiguous"
#     assert new_xyz.dtype == torch.float32 and xyz.dtype == torch.float32, "Input tensors must be float32"

#     B, M, _ = new_xyz.size()
#     N = xyz.size(1)

#     # 扩展维度以进行广播计算
#     new_xyz_expanded = new_xyz.unsqueeze(2).expand(-1, -1, N, -1)  # (B, M, N, 3)
#     xyz_expanded = xyz.unsqueeze(1).expand(-1, M, -1, -1)  # (B, M, N, 3)

#     # 计算距离平方
#     dist2 = torch.sum((new_xyz_expanded - xyz_expanded) ** 2, dim=-1)  # (B, M, N)
#     radius2 = radius * radius

#     # 标记距离小于半径平方的点
#     valid_mask = dist2 < radius2
#     # 将无效距离设为一个很大的值，避免被选中
#     dist2 = torch.where(valid_mask, dist2, torch.full_like(dist2, 1e10))

#     # 获取每个查询点的最近 nsample 个点的距离和索引
#     dist2_topk, idx = torch.topk(dist2, nsample, dim=-1, largest=False)
#     # 处理没有足够有效点的情况
#     # idx[dist2_topk >= 1e10] = -1

#     return idx

def ext_ball_query(radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
    """
    修正版球形邻域查询（匹配原函数行为）。
    
    参数:
    radius (float): 球形邻域半径
    nsample (int): 每个查询点最多采样点数
    xyz (torch.Tensor): 输入点云坐标，形状 (B, N, 3)
    new_xyz (torch.Tensor): 查询点坐标，形状 (B, M, 3)
    
    返回:
    torch.Tensor: 邻域点索引，形状 (B, M, nsample)，无效索引为 -1
    """
    assert new_xyz.is_contiguous() and xyz.is_contiguous(), "输入张量必须连续存储"
    assert new_xyz.dtype == torch.float32 and xyz.dtype == torch.float32, "输入张量必须为 float32 类型"

    B, M, _ = new_xyz.size()
    N = xyz.size(1)

    # 扩展维度计算所有点对距离平方 (B, M, N)
    new_xyz_expanded = new_xyz.unsqueeze(2).expand(-1, -1, N, -1)  # (B, M, N, 3)
    xyz_expanded = xyz.unsqueeze(1).expand(-1, M, -1, -1)          # (B, M, N, 3)
    dist2 = torch.sum((new_xyz_expanded - xyz_expanded) ** 2, dim=-1)  # (B, M, N)
    radius2 = radius * radius

    # 1. 标记有效点（距离 < 半径平方）
    valid_mask = dist2 < radius2  # (B, M, N)，True表示有效点

    # 2. 计算每个查询点的有效点累积计数（按点云原始顺序）
    valid_count = torch.cumsum(valid_mask.int(), dim=2)  # (B, M, N)，累积有效点数

    # 3. 筛选前 nsample 个有效点（按原始顺序，非距离排序）
    # 仅保留前 nsample 个有效点，其余有效点标记为无效
    select_mask = (valid_count <= nsample) & valid_mask  # (B, M, N)，True表示选中的有效点

    # 4. 获取选中点的索引（按原始点云顺序）
    # 创建 N 维索引张量 (B, M, N)
    indices = torch.arange(N, device=xyz.device).unsqueeze(0).unsqueeze(0).expand(B, M, -1)
    # 仅保留选中点的索引，其余位置置为 -1
    selected_indices = torch.where(select_mask, indices, torch.tensor(-1, device=xyz.device))

    # 5. 提取前 nsample 个有效索引（补全 -1）
    # 对每个查询点，取前 nsample 个非 -1 索引，不足则补 -1
    # 使用 topk 提取选中的位置（select_mask 为 True 的位置）
    # 注意：此处 topk 按位置排序，而非距离
    values, pos = torch.topk(select_mask.float(), k=nsample, dim=2, largest=True)  # pos 为选中点在 N 维的索引
    idx = torch.gather(selected_indices, dim=2, index=pos)  # (B, M, nsample)

    return idx

class BallQuery(Function):
    @staticmethod
    def forward(ctx, radius, nsample, xyz, new_xyz):
        # type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
        r"""

        Parameters
        ----------
        radius : float
            radius of the balls
        nsample : int
            maximum number of features in the balls
        xyz : torch.Tensor
            (B, N, 3) xyz coordinates of the features
        new_xyz : torch.Tensor
            (B, npoint, 3) centers of the ball query

        Returns
        -------
        torch.Tensor
            (B, npoint, nsample) tensor with the indicies of the features that form the query balls
        """
        # return _ext.ball_query(new_xyz, xyz, radius, nsample)
        return ext_ball_query(new_xyz, xyz, radius, nsample)

    @staticmethod
    def backward(ctx, a=None):
        return None, None, None, None


# ball_query = BallQuery.apply
ball_query = ext_ball_query


class QueryAndGroup(nn.Module):
    r"""
    Groups with a ball query of radius

    Parameters
    ---------
    radius : float32
        Radius of ball
    nsample : int32
        Maximum number of features to gather in the ball
    """

    def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
        # type: (QueryAndGroup, float, int, bool) -> None
        super(QueryAndGroup, self).__init__()
        self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
        self.ret_grouped_xyz = ret_grouped_xyz
        self.normalize_xyz = normalize_xyz
        self.sample_uniformly = sample_uniformly
        self.ret_unique_cnt = ret_unique_cnt
        if self.ret_unique_cnt:
            assert(self.sample_uniformly)

    def forward(self, xyz, new_xyz, features=None):
        # type: (QueryAndGroup, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            xyz coordinates of the features (B, N, 3)
        new_xyz : torch.Tensor
            centriods (B, npoint, 3)
        features : torch.Tensor
            Descriptors of the features (B, C, N)

        Returns
        -------
        new_features : torch.Tensor
            (B, 3 + C, npoint, nsample) tensor
        """
        idx = ball_query(self.radius, self.nsample, xyz, new_xyz)

        if self.sample_uniformly:
            unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
            for i_batch in range(idx.shape[0]):
                for i_region in range(idx.shape[1]):
                    unique_ind = torch.unique(idx[i_batch, i_region, :])
                    num_unique = unique_ind.shape[0]
                    unique_cnt[i_batch, i_region] = num_unique
                    sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
                    all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
                    idx[i_batch, i_region, :] = all_ind


        xyz_trans = xyz.transpose(1, 2).contiguous()
        grouped_xyz = grouping_operation(xyz_trans, idx)  # (B, 3, npoint, nsample)
        grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
        if self.normalize_xyz:
            if self.radius == 0:
                raise ValueError("Radius cannot be zero when normalizing coordinates.")
            grouped_xyz /= self.radius

        if features is not None:
            grouped_features = grouping_operation(features, idx)
            if self.use_xyz:
                new_features = torch.cat(
                    [grouped_xyz, grouped_features], dim=1
                )  # (B, C + 3, npoint, nsample)
            else:
                new_features = grouped_features
        else:
            assert (
                self.use_xyz
            ), "Cannot have not features and not use xyz as a feature!"
            new_features = grouped_xyz

        ret = [new_features]
        if self.ret_grouped_xyz:
            ret.append(grouped_xyz)
        if self.ret_unique_cnt:
            ret.append(unique_cnt)
        if len(ret) == 1:
            return ret[0]
        else:
            return tuple(ret)


class GroupAll(nn.Module):
    r"""
    Groups all features

    Parameters
    ---------
    """

    def __init__(self, use_xyz=True, ret_grouped_xyz=False):
        # type: (GroupAll, bool) -> None
        super(GroupAll, self).__init__()
        self.use_xyz = use_xyz

    def forward(self, xyz, new_xyz, features=None):
        # type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            xyz coordinates of the features (B, N, 3)
        new_xyz : torch.Tensor
            Ignored
        features : torch.Tensor
            Descriptors of the features (B, C, N)

        Returns
        -------
        new_features : torch.Tensor
            (B, C + 3, 1, N) tensor
        """

        grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
        if features is not None:
            grouped_features = features.unsqueeze(2)
            if self.use_xyz:
                new_features = torch.cat(
                    [grouped_xyz, grouped_features], dim=1
                )  # (B, 3 + C, 1, N)
            else:
                new_features = grouped_features
        else:
            new_features = grouped_xyz

        if self.ret_grouped_xyz:
            return new_features, grouped_xyz
        else:
            return new_features

# def ext_cylinder_query(new_xyz, xyz, rot, radius, hmin, hmax, nsample):
#     """
#     在给定点云中查询每个点在指定旋转圆柱邻域内的点。

#     参数:
#     new_xyz (torch.Tensor): 形状为 (B, M, 3) 的查询点坐标张量
#     xyz (torch.Tensor): 形状为 (B, N, 3) 的输入点云坐标张量
#     rot (torch.Tensor): 形状为 (B, M, 9) 的旋转矩阵张量
#     radius (float): 圆柱底面半径
#     hmin (float): 圆柱的最小高度
#     hmax (float): 圆柱的最大高度
#     nsample (int): 每个查询点最多采样的点数

#     返回:
#     torch.Tensor: 形状为 (B, M, nsample) 的索引张量，存储每个查询点邻域内点的索引
#     """
#     assert new_xyz.is_contiguous() and xyz.is_contiguous() and rot.is_contiguous(), "Input tensors must be contiguous"
#     assert new_xyz.dtype == torch.float32 and xyz.dtype == torch.float32 and rot.dtype == torch.float32, "Input tensors must be float32"

#     B, M, _ = new_xyz.size()
#     N = xyz.size(1)

#     # 初始化索引张量
#     idx = torch.full((B, M, nsample), -1, dtype=torch.long, device=new_xyz.device)

#     # 扩展维度以进行广播计算
#     new_xyz_expanded = new_xyz.unsqueeze(2).expand(-1, -1, N, -1)  # (B, M, N, 3)
#     xyz_expanded = xyz.unsqueeze(1).expand(-1, M, -1, -1)  # (B, M, N, 3)
#     rot_expanded = rot.unsqueeze(2).expand(-1, -1, N, -1)  # (B, M, N, 9)

#     # 计算相对坐标
#     relative_xyz = xyz_expanded - new_xyz_expanded  # (B, M, N, 3)

#     # 提取旋转矩阵元素
#     r0 = rot_expanded[..., 0].unsqueeze(-1)
#     r1 = rot_expanded[..., 1].unsqueeze(-1)
#     r2 = rot_expanded[..., 2].unsqueeze(-1)
#     r3 = rot_expanded[..., 3].unsqueeze(-1)
#     r4 = rot_expanded[..., 4].unsqueeze(-1)
#     r5 = rot_expanded[..., 5].unsqueeze(-1)
#     r6 = rot_expanded[..., 6].unsqueeze(-1)
#     r7 = rot_expanded[..., 7].unsqueeze(-1)
#     r8 = rot_expanded[..., 8].unsqueeze(-1)

#     # 旋转相对坐标
#     x_rot = r0 * relative_xyz[..., 0:1] + r3 * relative_xyz[..., 1:2] + r6 * relative_xyz[..., 2:3]
#     y_rot = r1 * relative_xyz[..., 0:1] + r4 * relative_xyz[..., 1:2] + r7 * relative_xyz[..., 2:3]
#     z_rot = r2 * relative_xyz[..., 0:1] + r5 * relative_xyz[..., 1:2] + r8 * relative_xyz[..., 2:3]

#     # 计算旋转后的距离平方
#     d2 = y_rot * y_rot + z_rot * z_rot
#     radius2 = radius * radius

#     # 检查点是否在圆柱内
#     in_cylinder = (d2 < radius2) & (x_rot > hmin) & (x_rot < hmax)
#     in_cylinder = in_cylinder.squeeze(-1)  # 确保维度一致

#     # 将无效距离设为一个很大的值，避免被选中
#     dist2 = torch.where(in_cylinder, d2.squeeze(-1), torch.full_like(d2.squeeze(-1), 1e10))

#     # 获取每个查询点的最近 nsample 个点的距离和索引
#     dist2_topk, idx = torch.topk(dist2, nsample, dim=-1, largest=False)

#     # 处理没有足够有效点的情况
#     idx[dist2_topk >= 1e10] = -1

#     return idx
# @torch.jit.script
# def ext_cylinder_query(radius: float, hmin: float, hmax: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, rot: torch.Tensor) -> torch.Tensor:

# # def ext_cylinder_query(new_xyz: torch.Tensor, xyz: torch.Tensor, rot: torch.Tensor, radius: float, hmin: float, hmax: float, nsample: int) -> torch.Tensor:
#     """
#     在给定点云中查询每个点在指定旋转圆柱邻域内的点。

#     参数:
#     new_xyz (torch.Tensor): 形状为 (B, M, 3) 的查询点坐标张量
#     xyz (torch.Tensor): 形状为 (B, N, 3) 的输入点云坐标张量
#     rot (torch.Tensor): 形状为 (B, M, 9) 的旋转矩阵张量
#     radius (float): 圆柱底面半径
#     hmin (float): 圆柱的最小高度
#     hmax (float): 圆柱的最大高度
#     nsample (int): 每个查询点最多采样的点数

#     返回:
#     torch.Tensor: 形状为 (B, M, nsample) 的索引张量，存储每个查询点邻域内点的索引
#     """
#     assert new_xyz.is_contiguous() and xyz.is_contiguous() and rot.is_contiguous(), "Input tensors must be contiguous"
#     assert new_xyz.dtype == torch.float32 and xyz.dtype == torch.float32 and rot.dtype == torch.float32, "Input tensors must be float32"

#     B, M, _ = new_xyz.size()
#     N = xyz.size(1)
#     idx = torch.full((B, M, nsample), -1, dtype=torch.long, device=new_xyz.device)
#     radius_squared = radius * radius

#     for b in range(B):
#         for m in range(M):
#             # 获取当前查询点和旋转矩阵
#             query_point = new_xyz[b, m]
#             rotation_matrix = rot[b, m].view(3, 3)

#             # 计算相对坐标
#             relative_points = xyz[b] - query_point

#             # 应用旋转
#             rotated_points = torch.matmul(relative_points, rotation_matrix.T)

#             # 检查点是否在圆柱内
#             d_squared = rotated_points[:, 1] ** 2 + rotated_points[:, 2] ** 2
#             x = rotated_points[:, 0]
#             in_cylinder = (d_squared < radius_squared) & (x > hmin) & (x < hmax)

#             # 获取有效点的索引
#             valid_indices = torch.nonzero(in_cylinder).squeeze(-1)
#             num_valid = valid_indices.numel()

#             if num_valid > 0:
#                 if num_valid < nsample:
#                     # 若有效点不足 nsample 个，重复填充第一个有效点
#                     padding = valid_indices[0].repeat(nsample - num_valid)
#                     selected_indices = torch.cat([valid_indices, padding])
#                 else:
#                     # 随机采样 nsample 个点
#                     # perm = torch.randperm(num_valid, device=valid_indices.device)
#                     random_values = torch.rand(num_valid, device=valid_indices.device)
#                     permuted_indices = torch.argsort(random_values)
#                     selected_indices = valid_indices[permuted_indices[:nsample]]
#                 idx[b, m] = selected_indices

#     return idx


# @torch.jit.script
def ext_cylinder_query(radius: float, hmin: float, hmax: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, rot: torch.Tensor) -> torch.Tensor:
    """
    向量化实现的圆柱邻域查询（无显式循环）。
    """
    assert new_xyz.is_contiguous() and xyz.is_contiguous() and rot.is_contiguous(), "输入张量必须连续存储"
    assert new_xyz.dtype == torch.float32 and xyz.dtype == torch.float32 and rot.dtype == torch.float32, "输入张量必须为 float32 类型"

    B, M, _ = new_xyz.size()  # B: 批次大小, M: 查询点数量
    N = xyz.size(1)           # N: 输入点云总点数
    radius_squared = radius * radius

    # -------------------------- 向量化计算相对坐标 --------------------------
    # 扩展维度: new_xyz (B, M, 1, 3), xyz (B, 1, N, 3)
    new_xyz_expanded = new_xyz.unsqueeze(2)  # (B, M, 1, 3)
    xyz_expanded = xyz.unsqueeze(1)          # (B, 1, N, 3)
    relative_points = xyz_expanded - new_xyz_expanded  # (B, M, N, 3)：所有查询点的相对坐标

    # -------------------------- 批量旋转矩阵乘法 --------------------------
    rot_reshaped = rot.view(B, M, 3, 3)  # (B, M, 3, 3)：将展平的旋转矩阵恢复为 3x3
    rotated_points = torch.matmul(relative_points, rot_reshaped.transpose(2, 3))  # (B, M, N, 3)：批量旋转

    # -------------------------- 向量化圆柱空间筛选 --------------------------
    d_squared = rotated_points[..., 1] **2 + rotated_points[..., 2]** 2  # (B, M, N)：径向距离平方
    x_rot = rotated_points[..., 0]  # (B, M, N)：旋转后X轴坐标（圆柱高度方向）
    in_cylinder = (d_squared < radius_squared) & (x_rot > hmin) & (x_rot < hmax)  # (B, M, N)：圆柱内点掩码

    # -------------------------- 向量化索引采样与填充 --------------------------
    # 1. 生成随机采样权重（无效点权重设为 -inf，避免被选中）
    random_weights = torch.rand(B, M, N, device=xyz.device)  # (B, M, N)
    random_weights = torch.where(in_cylinder, random_weights, torch.tensor(-1e10, device=xyz.device, dtype=random_weights.dtype))

    # 2. 按随机权重排序并取前 nsample 个点（模拟原逻辑的随机采样）
    sorted_indices = torch.argsort(random_weights, dim=2, descending=True)  # (B, M, N)：按权重降序
    idx = sorted_indices[..., :nsample]  # (B, M, nsample)：取前 nsample 个索引

    # 3. 计算有效点数量，不足时用第一个有效点填充
    valid_count = torch.sum(in_cylinder.long(), dim=2)  # (B, M)：每个查询点的有效点数
    first_valid_mask = (torch.cumsum(in_cylinder.long(), dim=2) == 1)  # (B, M, N)：第一个有效点的掩码
    first_valid_idx = torch.argmax(first_valid_mask.float(), dim=2)  # (B, M)：第一个有效点的索引

    # 4. 填充不足的有效点（对 valid_count < nsample 的位置）
    pad_mask = (torch.arange(nsample, device=xyz.device)[None, None, :] >= valid_count[:, :, None])  # (B, M, nsample)
    idx = torch.where(pad_mask, first_valid_idx[:, :, None].expand(-1, -1, nsample), idx)  # 用第一个有效点填充

    # 5. 无有效点时索引置 -1
    idx = torch.where(valid_count[:, :, None] > 0, idx, torch.tensor(-1, device=xyz.device))

    return idx

class CylinderQuery(Function):
    @staticmethod
    def forward(ctx, radius, hmin, hmax, nsample, xyz, new_xyz, rot):
        # type: (Any, float, float, float, int, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
        r"""

        Parameters
        ----------
        radius : float
            radius of the cylinders
        hmin, hmax : float
            endpoints of cylinder height in x-rotation axis
        nsample : int
            maximum number of features in the cylinders
        xyz : torch.Tensor
            (B, N, 3) xyz coordinates of the features
        new_xyz : torch.Tensor
            (B, npoint, 3) centers of the cylinder query
        rot: torch.Tensor
            (B, npoint, 9) flatten rotation matrices from
                           cylinder frame to world frame

        Returns
        -------
        torch.Tensor
            (B, npoint, nsample) tensor with the indicies of the features that form the query balls
        """
        # return _ext.cylinder_query(new_xyz, xyz, rot, radius, hmin, hmax, nsample)
        return ext_cylinder_query(new_xyz, xyz, rot, radius, hmin, hmax, nsample)

    @staticmethod
    def backward(ctx, a=None):
        return None, None, None, None, None, None, None


# cylinder_query = CylinderQuery.apply
cylinder_query = ext_cylinder_query


class CylinderQueryAndGroup(nn.Module):
    r"""
    Groups with a cylinder query of radius and height

    Parameters
    ---------
    radius : float32
        Radius of cylinder
    hmin, hmax: float32
        endpoints of cylinder height in x-rotation axis
    nsample : int32
        Maximum number of features to gather in the ball
    """

    def __init__(self, radius, hmin, hmax, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, rotate_xyz=True, sample_uniformly=False, ret_unique_cnt=False):
        # type: (CylinderQueryAndGroup, float, float, float, int, bool) -> None
        super(CylinderQueryAndGroup, self).__init__()
        self.radius, self.nsample, self.hmin, self.hmax, = radius, nsample, hmin, hmax
        self.use_xyz = use_xyz
        self.ret_grouped_xyz = ret_grouped_xyz
        self.normalize_xyz = normalize_xyz
        self.rotate_xyz = rotate_xyz
        self.sample_uniformly = sample_uniformly
        self.ret_unique_cnt = ret_unique_cnt
        if self.ret_unique_cnt:
            assert(self.sample_uniformly)

    def forward(self, xyz, new_xyz, rot, features=None):
        # type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
        r"""
        Parameters
        ----------
        xyz : torch.Tensor
            xyz coordinates of the features (B, N, 3)
        new_xyz : torch.Tensor
            centriods (B, npoint, 3)
        rot : torch.Tensor
            rotation matrices (B, npoint, 3, 3)
        features : torch.Tensor
            Descriptors of the features (B, C, N)

        Returns
        -------
        new_features : torch.Tensor
            (B, 3 + C, npoint, nsample) tensor
        """
        B, npoint, _ = new_xyz.size()
        idx = cylinder_query(self.radius, self.hmin, self.hmax, self.nsample, xyz, new_xyz, rot.view(B, npoint, 9))

        if self.sample_uniformly:
            unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
            for i_batch in range(idx.shape[0]):
                for i_region in range(idx.shape[1]):
                    unique_ind = torch.unique(idx[i_batch, i_region, :])
                    num_unique = unique_ind.shape[0]
                    unique_cnt[i_batch, i_region] = num_unique
                    sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
                    all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
                    idx[i_batch, i_region, :] = all_ind


        xyz_trans = xyz.transpose(1, 2).contiguous()
        grouped_xyz = grouping_operation(xyz_trans, idx)  # (B, 3, npoint, nsample)
        grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
        if self.normalize_xyz:
            grouped_xyz /= self.radius
        if self.rotate_xyz:
            grouped_xyz_ = grouped_xyz.permute(0, 2, 3, 1).contiguous() # (B, npoint, nsample, 3)
            grouped_xyz_ = torch.matmul(grouped_xyz_, rot)
            grouped_xyz = grouped_xyz_.permute(0, 3, 1, 2).contiguous()


        if features is not None:
            grouped_features = grouping_operation(features, idx)
            if self.use_xyz:
                new_features = torch.cat(
                    [grouped_xyz, grouped_features], dim=1
                )  # (B, C + 3, npoint, nsample)
            else:
                new_features = grouped_features
        else:
            assert (
                self.use_xyz
            ), "Cannot have not features and not use xyz as a feature!"
            new_features = grouped_xyz

        ret = [new_features]
        if self.ret_grouped_xyz:
            ret.append(grouped_xyz)
        if self.ret_unique_cnt:
            ret.append(unique_cnt)
        if len(ret) == 1:
            return ret[0]
        else:
            return tuple(ret)