import torch
import numpy as np
from scipy.spatial import KDTree
import time
import logging
import faiss
from faiss import GpuIndexFlatL2, StandardGpuResources
from typing import List, Tuple, Dict
import torch.cuda as cuda

torch.manual_seed(0)

class SingleGPUBlockProcessor:
    """单GPU环境下的块并行处理器"""
    def __init__(self, chunk_size=20000, use_faiss=True):
        self.chunk_size = chunk_size
        self.use_faiss = use_faiss
        self.streams = [cuda.Stream() for _ in range(4)]  # 创建4个CUDA流
        
        # 初始化FAISS资源
        if self.use_faiss:
            self.faiss_resources = StandardGpuResources()
            self.faiss_resources.setTempMemory(1024 * 1024 * 1024)  # 1GB临时内存
            
    def process_blocks(self, blocks):
        """使用CUDA流并行处理多个块"""
        results = []
        for i, block in enumerate(blocks):
            stream = self.streams[i % len(self.streams)]
            with cuda.stream(stream):
                result = self._process_block(block)
                results.append(result)
        
        # 同步所有流
        for stream in self.streams:
            stream.synchronize()
            
        return results
    
    def _process_block(self, block):
        """处理单个块的序列化"""
        if len(block) == 0:
            return torch.tensor([], dtype=torch.long, device=block.device)
            
        if self.use_faiss and len(block) > 1000:
            return self._process_block_faiss(block)
        else:
            return self._process_block_torch(block)
    
    def _process_block_faiss(self, block):
        """使用FAISS GPU加速处理块"""
        device = block.device
        n, d = block.shape
        
        # 创建FAISS索引
        index = GpuIndexFlatL2(self.faiss_resources, d)
        index.add(block.cpu().numpy())
        
        # 初始化结果
        seq_ids = torch.full((n,), -1, dtype=torch.long, device=device)
        first_idx = torch.randint(0, n, (1,), device=device).item()
        seq_ids[first_idx] = 0
        last_idx = first_idx
        
        # 预计算所有点的最近邻
        nearest_neighbors = torch.full((n,), -1, dtype=torch.long, device=device)
        min_distances = torch.full((n,), float('inf'), dtype=torch.float, device=device)
        
        # 使用FAISS查询最近邻
        k = min(1000, n)  # 一次查询最多1000个邻居
        for i in range(0, n, self.chunk_size):
            end_i = min(i + self.chunk_size, n)
            query = block[i:end_i].cpu().numpy()
            
            # 使用FAISS查询
            distances, indices = index.search(query, k)
            
            # 转换为PyTorch张量并更新最近邻信息
            dist_tensor = torch.tensor(distances, dtype=torch.float, device=device)
            idx_tensor = torch.tensor(indices, dtype=torch.long, device=device)
            
            for j in range(end_i - i):
                global_idx = i + j
                for neighbor_idx in idx_tensor[j]:
                    if neighbor_idx != global_idx:  # 排除自身
                        dist = dist_tensor[j, neighbor_idx]
                        if dist < min_distances[global_idx]:
                            min_distances[global_idx] = dist
                            nearest_neighbors[global_idx] = neighbor_idx
            
            del dist_tensor, idx_tensor
            
        # 后续处理与原方法相同
        reciprocal_nn = torch.zeros(n, dtype=torch.bool, device=device)
        for i in range(n):
            nn_i = nearest_neighbors[i].item()
            if nn_i != -1 and nearest_neighbors[nn_i].item() == i:
                reciprocal_nn[i] = True
        
        for i in range(1, n):
            current_idx = nearest_neighbors[last_idx].item()
            
            if reciprocal_nn[last_idx] and seq_ids[current_idx] == -1:
                pass
            else:
                current_idx = self._find_closest_unsampled_point(block, last_idx, seq_ids)
                if current_idx == -1:
                    break
                    
            seq_ids[current_idx] = i
            last_idx = current_idx
            
        return seq_ids
    
    def _process_block_torch(self, block):
        """使用PyTorch原生函数处理块"""
        return self._nearest_point_iterative(block, chunk_size=self.chunk_size)
    
    def _nearest_point_iterative(self, points, num_samples=None, chunk_size=None):
        """内存优化的最近点迭代采样"""
        N = len(points)
        if num_samples is None:
            num_samples = N
            
        if chunk_size is None:
            chunk_size = self.chunk_size
            
        seq_ids = torch.full((N,), -1, dtype=torch.long, device=points.device)
        first_idx = torch.randint(0, N, (1,), device=points.device).item()
        seq_ids[first_idx] = 0
        last_idx = first_idx
        
        # 预计算所有点的最近邻
        nearest_neighbors = torch.full((N,), -1, dtype=torch.long, device=points.device)
        min_distances = torch.full((N,), float('inf'), dtype=torch.float, device=points.device)
        
        for i in range(0, N, chunk_size):
            end_i = min(i + chunk_size, N)
            # 修改：移除out参数，使用临时变量
            dist_chunk = torch.cdist(points[i:end_i], points, p=1)
            
            chunk_min_dist, chunk_nn = torch.min(dist_chunk, dim=1)
            update_mask = chunk_min_dist < min_distances[i:end_i]
            update_indices = torch.arange(i, end_i, device=points.device)[update_mask]
            
            nearest_neighbors[update_indices] = chunk_nn[update_mask]
            min_distances[update_indices] = chunk_min_dist[update_mask]
            
            del dist_chunk, chunk_min_dist, chunk_nn, update_mask, update_indices
            cuda.empty_cache()
        
        # 查找相互最近邻
        reciprocal_nn = torch.zeros(N, dtype=torch.bool, device=points.device)
        for i in range(N):
            nn_i = nearest_neighbors[i].item()
            if nn_i != -1 and nearest_neighbors[nn_i].item() == i:
                reciprocal_nn[i] = True
        
        # 迭代选择采样点
        for i in range(1, num_samples):
            current_idx = nearest_neighbors[last_idx].item()
            
            if reciprocal_nn[last_idx] and seq_ids[current_idx] == -1:
                pass
            else:
                current_idx = self._find_closest_unsampled_point(points, last_idx, seq_ids, chunk_size)
                if current_idx == -1:
                    break
                    
            seq_ids[current_idx] = i
            last_idx = current_idx
            
        return seq_ids
    
    def _find_closest_unsampled_point(self, points: torch.Tensor, last_idx: int, 
                                     seq_ids: torch.Tensor, chunk_size: int) -> int:
        """查找未采样最近点"""
        N = len(points)
        min_dist = float('inf')
        current_idx = -1
        
        for j in range(0, N, chunk_size):
            end_j = min(j + chunk_size, N)
            # 修改：移除out参数，使用临时变量
            dist_chunk = torch.cdist(points[last_idx:last_idx+1], points[j:end_j], p=1).squeeze(0)
            
            mask = (seq_ids[j:end_j] == -1)
            valid_dists = dist_chunk[mask]
            
            if len(valid_dists) > 0:
                local_min, local_idx = torch.min(valid_dists, dim=0)
                if local_min < min_dist:
                    min_dist = local_min
                    current_idx = torch.nonzero(mask, as_tuple=True)[0][local_idx].item() + j
            
            del dist_chunk, mask, valid_dists
            cuda.empty_cache()
            
        return current_idx

class BatchPointEncoderDecoder:
    def __init__(self, max_points_per_batch=204800, num_blocks=16, use_faiss=True):
        """
        初始化编码器/解码器（支持按块并行处理）
        
        参数:
        max_points_per_batch: 每个batch允许的最大点数
        num_blocks: 每个batch的分块数量
        use_faiss: 是否使用FAISS加速最近邻搜索
        """
        self.max_points_per_batch = max_points_per_batch
        self.num_blocks = num_blocks  # 每个batch的分块数量
        self.batch_bits = 16       # 支持最多 2^16=65536 个batch
        self.index_bits = 21       # 2^21=2097152 > 204800
        self.offset_bits = 26      # 2^26=67108864
        
        assert self.batch_bits + self.index_bits + self.offset_bits <= 63, "总位数超过63位"
        
        # 计算掩码和位移量
        self.batch_mask = (1 << self.batch_bits) - 1
        self.index_mask = (1 << self.index_bits) - 1
        self.offset_mask = (1 << self.offset_bits) - 1
        self.batch_shift = self.index_bits + self.offset_bits
        self.index_shift = self.offset_bits
        
        # 自动计算合适的chunk_size
        self.chunk_size = self._calculate_chunk_size()
        
        # 初始化单GPU并行处理器
        self.block_processor = SingleGPUBlockProcessor(
            chunk_size=self.chunk_size,
            use_faiss=use_faiss
        )
    
    def _calculate_chunk_size(self) -> int:
        """根据max_points_per_batch自动计算chunk_size"""
        if self.max_points_per_batch < 10000:
            return self.max_points_per_batch
        elif self.max_points_per_batch < 100000:
            return 15000
        else:
            return 20000  # 大规模点云使用更大的chunk_size
    
    def _split_into_blocks(self, points: torch.Tensor) -> List[torch.Tensor]:
        """将batch内点云按指定块数均匀分块"""
        N = len(points)
        block_size = (N + self.num_blocks - 1) // self.num_blocks  # 向上取整
        blocks = []
        for i in range(self.num_blocks):
            start = i * block_size
            end = min(start + block_size, N)
            blocks.append(points[start:end])
        return blocks
    
    def _process_blocks_parallel(self, blocks: List[torch.Tensor]) -> List[torch.Tensor]:
        """使用CUDA流并行处理多个块"""
        return self.block_processor.process_blocks(blocks)
    
    def _merge_block_orders(self, blocks: List[torch.Tensor], block_orders: List[torch.Tensor]) -> torch.Tensor:
        """基于块重心L1距离合并块顺序"""
        if not blocks:
            return torch.tensor([], dtype=torch.long, device=blocks[0].device)
            
        # 计算块重心
        centroids = [torch.mean(block, dim=0) for block in blocks if len(block) > 0]
        if not centroids:
            return torch.tensor([], dtype=torch.long, device=blocks[0].device)
            
        # 计算重心L1距离
        centroid_distances = [torch.sum(torch.abs(centroid)).item() for centroid in centroids]
        sorted_indices = sorted(range(len(blocks)), key=lambda i: centroid_distances[i])
        
        # 合并块顺序
        merged_order = []
        current_offset = 0
        for idx in sorted_indices:
            order = block_orders[idx]
            for sample_idx in order:
                if sample_idx != -1:
                    merged_order.append(current_offset + sample_idx)
            current_offset += len(blocks[idx])
            
        return torch.tensor(merged_order, dtype=torch.long, device=blocks[0].device)
    
    def _get_batch_sampling_order(self, points, batch_indices):
        """计算每个batch内点的采样顺序（支持分块并行处理）"""
        device = points.device
        N = len(batch_indices)
        sampling_order = torch.zeros(N, dtype=torch.long, device=device)
        
        unique_batches = torch.unique(batch_indices)
        for batch_id in unique_batches:
            batch_mask = (batch_indices == batch_id).nonzero(as_tuple=True)[0]
            batch_points = points[batch_mask]
            batch_size = len(batch_mask)
            
            if batch_size == 0:
                continue
                
            # 分块处理当前batch
            blocks = self._split_into_blocks(batch_points)
            block_orders = self._process_blocks_parallel(blocks)
            batch_order = self._merge_block_orders(blocks, block_orders)
            
            # 映射到全局索引
            for global_idx, sample_idx in enumerate(batch_order):
                if global_idx < len(batch_mask):
                    sampling_order[batch_mask[global_idx]] = sample_idx
        
        return sampling_order

    def encode(self, points, batch_indices):
        """编码器：输入点云与batch信息，输出一一对应的code"""
        N = len(points)
        assert len(batch_indices) == N, "输入点云与batch_indices长度必须一致"
        
        # 计算采样顺序
        sampling_order = self._get_batch_sampling_order(points, batch_indices)
        
        # 构建编码
        global_offsets = torch.arange(N, dtype=torch.long, device=points.device)
        codes = (
            (batch_indices.to(torch.int64) & self.batch_mask) << self.batch_shift |
            (sampling_order.to(torch.int64) & self.index_mask) << self.index_shift |
            (global_offsets & self.offset_mask)
        )
        return codes

    def decode(self, codes, grid_coord=None, if_test=False):
        """解码器：从code中恢复batch、采样序号、原始索引"""
        batch_values = (codes >> self.batch_shift) & self.batch_mask
        sampling_order = (codes >> self.index_shift) & self.index_mask
        global_offsets = codes & self.offset_mask
        
        xyz = None
        if grid_coord is not None:
            xyz = grid_coord[global_offsets]
        
        if not if_test:
            return xyz[:,0], xyz[:,1], xyz[:,2], batch_values

        return xyz, batch_values, sampling_order, global_offsets

# 保持接口不变
nps = BatchPointEncoderDecoder(max_points_per_batch=204800)
def encode(points, batch_indices):
    return nps.encode(points, batch_indices)
def decode(codes, grid_coord=None):
    return nps.decode(codes, grid_coord)

if __name__ == "__main__":
    # 测试数据（2个batch，每个batch有6个点，分为2块）
    points = torch.tensor([
        [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],  # batch=0
        [10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0],  # batch=1
    ], device='cuda')
    batch_indices = torch.tensor([0, 0, 0, 1, 1, 1], device='cuda')
    grid_coord = points.clone()
    
    # 初始化时指定分块数量
    encoder_decoder = BatchPointEncoderDecoder(max_points_per_batch=204800, num_blocks=2)
    
    # 编码
    codes = encoder_decoder.encode(points, batch_indices)
    print("编码结果:")
    print(codes)
    
    # 解码
    xyz, b, order, offsets = encoder_decoder.decode(codes, grid_coord, if_test=True)
    print("解码后的坐标:")
    print(xyz)
    print("解码后的batch编号:")
    print(b)
    print("采样顺序:")
    print(order)
    print("原始索引:")
    print(offsets)