import torch
import numpy as np
from scipy.spatial import KDTree
import time
import logging
import faiss
from faiss import GpuIndexFlatL2, StandardGpuResources
from typing import List, Tuple, Dict
torch.manual_seed(0)

class BatchPointEncoderDecoder:
    def __init__(self, max_points_per_batch=204800, num_blocks=16):
        """
        初始化编码器/解码器（支持按块并行处理）
        
        参数:
        max_points_per_batch: 每个batch允许的最大点数
        num_blocks: 每个batch的分块数量
        """
        self.max_points_per_batch = max_points_per_batch
        self.num_blocks = num_blocks  # 每个batch的分块数量
        self.batch_bits = 16       # 支持最多 2^16=65536 个batch
        self.index_bits = 21       # 2^21=2097152 > 204800
        self.offset_bits = 26      # 2^26=67108864
        
        assert self.batch_bits + self.index_bits + self.offset_bits <= 63, "总位数超过63位"
        
        # 计算掩码和位移量
        self.batch_mask = (1 << self.batch_bits) - 1
        self.index_mask = (1 << self.index_bits) - 1
        self.offset_mask = (1 << self.offset_bits) - 1
        self.batch_shift = self.index_bits + self.offset_bits
        self.index_shift = self.offset_bits
        
        # 自动计算合适的chunk_size
        self.chunk_size = self._calculate_chunk_size()
    
    def _calculate_chunk_size(self) -> int:
        """根据max_points_per_batch自动计算chunk_size"""
        if self.max_points_per_batch < 10000:
            return self.max_points_per_batch
        elif self.max_points_per_batch < 100000:
            return 15000
        else:
            return 20000  # 大规模点云使用更大的chunk_size
    
    def _split_into_blocks(self, points: torch.Tensor) -> List[torch.Tensor]:
        """将batch内点云按指定块数均匀分块"""
        N = len(points)
        block_size = (N + self.num_blocks - 1) // self.num_blocks  # 向上取整
        blocks = []
        for i in range(self.num_blocks):
            start = i * block_size
            end = min(start + block_size, N)
            blocks.append(points[start:end])
        return blocks
    
    def _process_blocks_parallel(self, blocks: List[torch.Tensor]) -> List[torch.Tensor]:
        """并行处理多个块，返回每个块的采样顺序"""
        if not blocks:
            return []
            
        # 并行处理每个块
        block_orders = []
        for block in blocks:
            if len(block) == 0:
                block_orders.append(torch.tensor([], dtype=torch.long, device=block.device))
                continue
                
            # 使用当前块大小的1/2作为chunk_size，避免内存溢出
            chunk_size = max(1000, len(block) // 2)
            order = self._nearest_point_iterative(block, chunk_size=chunk_size)
            block_orders.append(order)
            
        return block_orders
    
    def _merge_block_orders(self, blocks: List[torch.Tensor], block_orders: List[torch.Tensor]) -> torch.Tensor:
        """基于块重心L1距离合并块顺序"""
        if not blocks:
            return torch.tensor([], dtype=torch.long, device=blocks[0].device)
            
        # 计算块重心
        centroids = [torch.mean(block, dim=0) for block in blocks if len(block) > 0]
        if not centroids:
            return torch.tensor([], dtype=torch.long, device=blocks[0].device)
            
        # 计算重心L1距离
        centroid_distances = [torch.sum(torch.abs(centroid)).item() for centroid in centroids]
        sorted_indices = sorted(range(len(blocks)), key=lambda i: centroid_distances[i])
        
        # 合并块顺序
        merged_order = []
        current_offset = 0
        for idx in sorted_indices:
            order = block_orders[idx]
            for sample_idx in order:
                if sample_idx != -1:
                    merged_order.append(current_offset + sample_idx)
            current_offset += len(blocks[idx])
            
        return torch.tensor(merged_order, dtype=torch.long, device=blocks[0].device)
    
    def _get_batch_sampling_order(self, points, batch_indices):
        """计算每个batch内点的采样顺序（支持分块并行处理）"""
        device = points.device
        N = len(batch_indices)
        sampling_order = torch.zeros(N, dtype=torch.long, device=device)
        
        unique_batches = torch.unique(batch_indices)
        for batch_id in unique_batches:
            batch_mask = (batch_indices == batch_id).nonzero(as_tuple=True)[0]
            batch_points = points[batch_mask]
            batch_size = len(batch_mask)
            
            if batch_size == 0:
                continue
                
            # 分块处理当前batch
            blocks = self._split_into_blocks(batch_points)
            block_orders = self._process_blocks_parallel(blocks)
            batch_order = self._merge_block_orders(blocks, block_orders)
            
            # 映射到全局索引
            for global_idx, sample_idx in enumerate(batch_order):
                if global_idx < len(batch_mask):
                    sampling_order[batch_mask[global_idx]] = sample_idx
        
        return sampling_order

    def _nearest_point_iterative(self, points, num_samples=None, chunk_size=None, device='cuda'):
        """内存优化的最近点迭代采样（支持动态chunk_size）"""
        N = len(points)
        if num_samples is None:
            num_samples = N
            
        if chunk_size is None:
            chunk_size = self.chunk_size
            
        points = points.to(device)
        seq_ids = torch.full((N,), -1, dtype=torch.long, device=device)
        first_idx = torch.randint(0, N, (1,), device=device).item()
        seq_ids[first_idx] = 0
        last_idx = first_idx
        
        # 计算最近邻
        nearest_neighbors = torch.full((N,), -1, dtype=torch.long, device=device)
        min_distances = torch.full((N,), float('inf'), dtype=torch.float, device=device)
        
        for i in range(0, N, chunk_size):
            end_i = min(i + chunk_size, N)
            dist_chunk = torch.cdist(points[i:end_i], points, p=1)  # L1距离
            
            chunk_min_dist, chunk_nn = torch.min(dist_chunk, dim=1)
            update_mask = chunk_min_dist < min_distances[i:end_i]
            update_indices = torch.arange(i, end_i, device=device)[update_mask]
            nearest_neighbors[update_indices] = chunk_nn[update_mask]
            min_distances[update_indices] = chunk_min_dist[update_mask]
            
            del dist_chunk, chunk_min_dist, chunk_nn, update_mask, update_indices
            torch.cuda.empty_cache() if device == 'cuda' else None
        
        # 查找相互最近邻
        reciprocal_nn = torch.zeros(N, dtype=torch.bool, device=device)
        for i in range(N):
            nn_i = nearest_neighbors[i].item()
            if nn_i != -1 and nearest_neighbors[nn_i].item() == i:
                reciprocal_nn[i] = True
        
        # 迭代选择采样点
        for i in range(1, num_samples):
            current_idx = nearest_neighbors[last_idx].item()
            
            if reciprocal_nn[last_idx] and seq_ids[current_idx] == -1:
                pass
            else:
                current_idx = self._find_closest_unsampled_point(points, last_idx, seq_ids, chunk_size)
                if current_idx == -1:
                    break
                    
            seq_ids[current_idx] = i
            last_idx = current_idx
            
        return seq_ids
    
    def _find_closest_unsampled_point(self, points: torch.Tensor, last_idx: int, 
                                     seq_ids: torch.Tensor, chunk_size: int) -> int:
        """查找未采样最近点"""
        N = len(points)
        min_dist = float('inf')
        current_idx = -1
        for j in range(0, N, chunk_size):
            end_j = min(j + chunk_size, N)
            dist_chunk = torch.cdist(points[last_idx:last_idx+1], points[j:end_j], p=1).squeeze(0)
            mask = (seq_ids[j:end_j] == -1)
            valid_dists = dist_chunk[mask]
            if len(valid_dists) > 0:
                local_min, local_idx = torch.min(valid_dists, dim=0)
                if local_min < min_dist:
                    min_dist = local_min
                    current_idx = torch.nonzero(mask, as_tuple=True)[0][local_idx].item() + j
            del dist_chunk, mask, valid_dists
            torch.cuda.empty_cache() if points.device == 'cuda' else None
        return current_idx

    def encode(self, points, batch_indices):
        """编码器：输入点云与batch信息，输出一一对应的code"""
        N = len(points)
        assert len(batch_indices) == N, "输入点云与batch_indices长度必须一致"
        
        # 计算采样顺序
        sampling_order = self._get_batch_sampling_order(points, batch_indices)
        
        # 构建编码
        global_offsets = torch.arange(N, dtype=torch.long, device=points.device)
        codes = (
            (batch_indices.to(torch.int64) & self.batch_mask) << self.batch_shift |
            (sampling_order.to(torch.int64) & self.index_mask) << self.index_shift |
            (global_offsets & self.offset_mask)
        )
        return codes

    def decode(self, codes, grid_coord=None, if_test=False):
        """解码器：从code中恢复batch、采样序号、原始索引"""
        batch_values = (codes >> self.batch_shift) & self.batch_mask
        sampling_order = (codes >> self.index_shift) & self.index_mask
        global_offsets = codes & self.offset_mask
        
        xyz = None
        if grid_coord is not None:
            xyz = grid_coord[global_offsets]
        
        if not if_test:
            return xyz[:,0], xyz[:,1], xyz[:,2], batch_values

        return xyz, batch_values, sampling_order, global_offsets

# 保持接口不变
nps = BatchPointEncoderDecoder(max_points_per_batch=204800)
def encode(points, batch_indices):
    return nps.encode(points, batch_indices)
def decode(codes, grid_coord=None):
    return nps.decode(codes, grid_coord)

if __name__ == "__main__":
    # 测试数据（2个batch，每个batch有6个点，分为2块）
    points = torch.tensor([
        [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],  # batch=0
        [10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0],  # batch=1
    ], device='cuda')
    batch_indices = torch.tensor([0, 0, 0, 1, 1, 1], device='cuda')
    grid_coord = points.clone()
    
    # 初始化时指定分块数量
    encoder_decoder = BatchPointEncoderDecoder(max_points_per_batch=204800, num_blocks=2)
    
    # 编码
    codes = encoder_decoder.encode(points, batch_indices)
    print("编码结果:")
    print(codes)
    
    # 解码
    xyz, b, order, offsets = encoder_decoder.decode(codes, grid_coord, if_test=True)
    print("解码后的坐标:")
    print(xyz)
    print("解码后的batch编号:")
    print(b)
    print("采样顺序:")
    print(order)
    print("原始索引:")
    print(offsets)