import math
import numpy as np

import torch

from mmcv.ops.points_sampler import get_sampler_cls

from mmcv.ops import knn


@torch.no_grad()
def sample_grid(
    token_positions: torch.Tensor,
    batch_token_ids,
    num_super_tokens_per_batch: int,
):
    D = token_positions.shape[-1]
    device = token_positions.device
    num_st = 0
    batch_st_ids = []
    st_src = torch.zeros_like(token_positions[:, 0]).long()

    assert math.sqrt(num_super_tokens_per_batch).is_integer()

    sqrt_num = int(math.sqrt(num_super_tokens_per_batch))

    pos = np.linspace(0, 360, sqrt_num+1, endpoint=False)[1:]

    X, Y = np.meshgrid(pos, pos)
    st_pos_batch = np.concatenate(
        [np.zeros_like(X).reshape(-1, 1), Y.reshape(-1, 1), X.reshape(-1, 1)], axis=-1)
    indices = [torch.arange(len(inds), device=device)
               for inds in batch_token_ids]
    num_st = 0
    batch_st_ids = []
    batch_st_pos = []

    for b, inds in enumerate(batch_token_ids):
        st_pos_batch_torch = torch.tensor(
            st_pos_batch, device=device).float().unsqueeze(0)
        pos = token_positions[inds].float().unsqueeze(0)
        inds = inds[indices[b]]
        st_inds = knn(1, st_pos_batch_torch, pos)[
            0, 0].long()                          # [N]
        # write data
        st_src[inds] = st_inds + num_st

        batch_st_ids.append(torch.arange(
            num_super_tokens_per_batch, device=device) + num_st)
        num_st += num_super_tokens_per_batch
        batch_st_pos.append(st_pos_batch_torch.squeeze(0))

    return torch.cat(batch_st_pos, dim=0), st_src, batch_st_ids

# TODO: include feature / occupancy here?

@torch.no_grad()
def sample_pooling(
    token_positions: torch.Tensor, 
    batch_token_ids,
    kernel_size,
    kernel_offset,
    spatial_shape):
    """
    sample super token by pooling
    
    Args:
        token_positions (torch.Tensor): [N, D] in voxel / pixel coordinates
        batch_token_ids (list[torch.Tensor]): token indices for each batch 
        kernel_size (tuple[int]): kernel size
        kernel_offset (tuple[int]): kernel offset
        spatial_shape (tuple[int]): spatial shape
        
    Returns:
        super_token_positions (torch.Tensor): [M, D]
        super_token_src (torch.Tensor): [N]
        batch_super_token_ids (torch.Tensor): list[[M_0], ..., [M_B-1]]
    """
    # constants
    D = token_positions.shape[-1]
    device = token_positions.device
    # expand parameters / compute intermidiate variables
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size] * len(spatial_shape)
    if isinstance(kernel_offset, int):
        kernel_offset = [kernel_offset] * len(spatial_shape)
    kernel_size = torch.tensor(kernel_size, dtype=torch.long, device=device)
    kernel_offset = torch.tensor(kernel_offset, dtype=torch.long, device=device)
    spatial_shape = torch.tensor(spatial_shape.tolist(), dtype=torch.long, device=device)
    out_spatial_shape = torch.ceil((spatial_shape + kernel_offset) / kernel_size).tolist()
    out_id_scale = np.cumprod([1] + out_spatial_shape[:-1])
    out_id_scale = torch.from_numpy(out_id_scale).to(device)
    # compute super token info
    num_st = 0
    batch_st_ids = []
    st_src = torch.zeros_like(token_positions[:, 0]).long()
    for inds in batch_token_ids:
        # pool token positions
        pos = token_positions[inds] + kernel_offset
        pos = torch.floor(pos / kernel_size).long()
        pos = (pos * out_id_scale).sum(dim=1)
        # compute super token indices
        st_pos, st_inds, counts = torch.unique(pos, return_inverse=True, return_counts=True)
        # write data
        st_src[inds] = st_inds + num_st
        batch_st_ids.append(torch.arange(len(st_pos), device=device) + num_st)
        num_st += st_pos.shape[0]
    # compute super token positions
    st_pos = torch.zeros(num_st, D, dtype=torch.float32, device=device)
    st_pos.index_reduce_(0, st_src, token_positions.float(), 'mean', include_self=False)
    return st_pos, st_src, batch_st_ids


@torch.no_grad()
def sample_fps(
    token_positions: torch.Tensor, 
    batch_token_ids, 
    num_super_tokens_per_batch: int,
    fps_mode: str = "D-FPS",
    shuffule: bool = True,
):
    """
    sample super token by furthest point sampling and nearest neighbor search
    
    Args:
        token_positions (torch.Tensor): [N, D] in voxel / pixel coordinates
        batch_token_ids (list[torch.Tensor]): token indices for each batch 
        num_super_tokens_per_batch (int): number of super tokens per batch
        shuffule (bool): whether to shuffle the tokens before sampling to produce different results
        
    Returns:
        super_token_positions (torch.Tensor): [M, D]
        super_token_src (torch.Tensor): [N]
        batch_super_token_ids (torch.Tensor): list[[M_0], ..., [M_B-1]]
    """
    # constants
    D = token_positions.shape[-1]
    device = token_positions.device
    sampler = get_sampler_cls(fps_mode)()
    # prepare raw indices
    if shuffule:
        indices = [torch.randperm(len(inds), device=device) for inds in batch_token_ids]
    else:
        indices = [torch.arange(len(inds), device=device) for inds in batch_token_ids]
    # compute st for each batch
    num_st = 0
    batch_st_ids = []
    st_src = torch.zeros_like(token_positions[:, 0], dtype=torch.long)
    for b, inds in enumerate(batch_token_ids):
        # reindex
        inds = inds[indices[b]]
        pos = token_positions[inds].float().unsqueeze(0)                        # [1 x N x D]
        feat = torch.zeros_like(pos[..., [0]])                                  # [1 x N x 1] 
        # sample anchor
        anchor_inds = sampler(pos, feat, num_super_tokens_per_batch)[0].long()  # [K]
        # cluster by finding K closest anchor
        anchor_pos = pos[:, anchor_inds]
        st_inds = knn(1, anchor_pos, pos)[0, 0].long()                          # [N]
        # write data
        st_src[inds] = st_inds + num_st
        batch_st_ids.append(torch.arange(num_super_tokens_per_batch, device=device) + num_st)
        num_st += num_super_tokens_per_batch
    # compute super token positions
    st_pos = torch.zeros(num_st, D, dtype=torch.float32, device=device)
    st_pos.index_reduce_(0, st_src, token_positions.float(), 'mean', include_self=False)
    return st_pos, st_src, batch_st_ids
    

SAMPLING_FNS = {
    "pooling": sample_pooling,
    "fps": sample_fps,
    "grid": sample_grid
}
