import numpy as np
import math

import torch
from torch import nn
from einops import repeat, rearrange, reduce

from mmcv.ops.points_sampler import get_sampler_cls
from mmcv.ops import knn

from .basic_blocks import SIRENEmbed, TableEmbed


class SuperTokenInitializer(nn.Module):
    GLB_STATES = {}
    def __init__(self):
        super().__init__()
        self.states = {}
    def forward(self, positions, features, offsets):
        """initialize super token
        """
        pass
    def clr_glb_state(self):
        SuperTokenInitializer.GLB_STATES = {}
    def store_glb_state(self, key, val):
        SuperTokenInitializer.GLB_STATES[key] = val
    def get_glb_state(self, key, default=None):
        if default is None and key not in SuperTokenInitializer.GLB_STATES:
            existing_keys = list(SuperTokenInitializer.GLB_STATES.keys())
            raise ValueError(f"given key [{key}] is not in GLB_STATES with keys [{existing_keys}]")
        return SuperTokenInitializer.GLB_STATES.get(key, default)


class SuperTokenInitializerFPS(SuperTokenInitializer):
    """FPS Sample S positions as super token"""
    
    def __init__(self, 
        sample_mode,
        num_super_token, 
        d_embed,
        n_dim, 
        num_associate,
        pe_cfg=None,
        **kwargs
    ):
        super().__init__()
        self.sampler = get_sampler_cls(sample_mode)()
        self.num_super_token = num_super_token
        self.num_associate = num_associate
        if pe_cfg is not None:
            self.pe = SIRENEmbed(d_embed, n_dim, d_embed, **pe_cfg)
        
    def forward(self, positions, features, offsets):
        # constants
        S = self.num_super_token
        A = self.num_associate
        C = features.shape[1]
        N, D = positions.shape
        B = len(offsets)
        device = positions.device
        # generate associations
        asso_src_ids = []
        asso_dst_ids = []
        st_pos = []
        batch_start = 0
        for b, batch_end in enumerate(offsets):
            perm = torch.randperm(batch_end - batch_start, device=device)
            pos = positions[batch_start:batch_end].unsqueeze(0)
            feats = features[batch_start:batch_end].unsqueeze(0).permute(0, 2, 1)
            pos_perm = pos[:, perm].contiguous()
            feats_perm = feats[:, :, perm].contiguous()
            st_ids = self.sampler(pos_perm, feats_perm, S).long()                  # 1 x S
            center_pos = pos_perm.gather(1, repeat(st_ids, "b s -> b s d", d=D))   # 1 x S x D
            st_pos.append(center_pos[0])
            st_ids = knn(A, center_pos.float(), pos.float()).long()                # 1 x A x N
            st_ids = st_ids + b * S
            asso_dst_ids.append(st_ids[0])
            pt_ids = torch.arange(batch_end - batch_start, device=device).long()
            pt_ids = repeat(pt_ids + batch_start, "n -> a n", a=A)
            asso_src_ids.append(pt_ids)
            batch_start = batch_end
        asso_src_ids = rearrange(torch.cat(asso_src_ids, dim=1), "a n -> (a n)")    # A * N
        asso_dst_ids = rearrange(torch.cat(asso_dst_ids, dim=1), "a n -> (a n)")    # A * N
        asso = torch.stack([asso_src_ids, asso_dst_ids], dim=-1)                    # A * N x 2
        st_pos = torch.cat(st_pos, dim=0)                                           # B * S x D
        # generate super token features
        if hasattr(self, "pe"):
            st_feat = self.pe(st_pos)
        else:
            st_feat = torch.zeros(B, S, C, device=device)
        return dict(
            asso=asso,
            st_feat=st_feat,
            st_pos=st_pos,
            num_sts=[S] * B,
        )
    

class KNNAssociate:
    def __init__(self, num_associate, *args, **kwargs):
        self.num_associate = num_associate
    def __call__(self, info):
        st_pos = info["st_pos"] # S x D
        pt_pos = info["pt_pos"] # P x D
        st_offset = info["st_offset"]
        pt_offset = info["pt_offset"]
        device = pt_pos.device
        S = st_pos.shape[0]
        P = pt_pos.shape[0]
        A = self.num_associate
        # perform knn
        pt_ids = torch.arange(P, device=device).long() + pt_offset
        pt_ids = repeat(pt_ids, "n -> (a n)", a=A)
        st_ids = knn(A, st_pos.unsqueeze(0), pt_pos.unsqueeze(0))
        st_ids = st_ids[0].long() + st_offset
        st_ids = rearrange(st_ids, "a n -> (a n)")
        # merge
        return torch.stack([pt_ids, st_ids], dim=-1)
    
    
class GridAssociate:
    def __init__(self, grid_range, *args, **kwargs):
        assert grid_range >= 1
        self.grid_range = grid_range    # multiple of grid size
    def __call__(self, info):
        st_pos = info["st_pos"] # S x D
        pt_pos = info["pt_pos"] # P x D
        st_offset = info["st_offset"]
        pt_offset = info["pt_offset"]
        grid_extent = info["grid_extent"]
        S = st_pos.shape[0]
        search_extent = grid_extent * self.grid_range
        # clac distance
        dist = (st_pos.unsqueeze(0) - pt_pos.unsqueeze(1)).abs()    # P x S x D
        valid = (dist < search_extent).all(dim=-1).flatten()        # P * S
        inds = torch.nonzero(valid, as_tuple=True)[0]               # A
        pt_ids = inds // S + pt_offset
        st_ids = inds % S + st_offset
        return torch.stack([pt_ids, st_ids], dim=-1)


def get_pooling_asso(cfg):
    name = cfg["type"]
    if name == "knn":
        return KNNAssociate(**cfg)
    elif name == "grid":
        return GridAssociate(**cfg)

class SuperTokenInitializerPooling(SuperTokenInitializer):
    """Use SparsePooling to generate super token"""
    def __init__(
        self, 
        grid_size,
        d_embed,
        n_dim,
        num_associate,
        custom_asso=None,   # defaults to KNN based asso
        pe_cfg=dict(num_layers=2, temperature=1),
        rel_bias_cfg=None,
        keep_empty=False,
        force_square_grid=False,
        **kwargs
    ):
        super().__init__()
        if custom_asso is None:
            self.asso = KNNAssociate(num_associate)
            self.asso_range = num_associate ** (1 / 3)
        else:
            self.asso = get_pooling_asso(custom_asso)
            self.asso_range = custom_asso.get("grid_range", 1)
        # grid size
        self.force_square_grid = force_square_grid
        if force_square_grid:
            assert isinstance(grid_size, int)
            self.grid_size = grid_size
        else:
            if isinstance(grid_size, int):
                grid_size = [grid_size] * n_dim
            else:
                grid_size = list(grid_size)
                assert len(grid_size) == n_dim
            self.grid_size = nn.Parameter(torch.tensor(grid_size).float(), requires_grad=False)
            dim_offset = np.cumprod([1] + grid_size[::-1])[:-1][::-1].tolist()
            self.dim_offset = nn.Parameter(torch.tensor(dim_offset).float(), requires_grad=False)
            # base grid center position (normalized to 0-1)
            grid = torch.stack(torch.meshgrid(
                *[torch.arange(s) for s in self.grid_size]
            ), dim=-1).view(-1, n_dim)
            self.grid_ind = nn.Parameter(grid, requires_grad=False)
            grid = (grid + 0.5) / self.grid_size
            self.grid = nn.Parameter(grid, requires_grad=False)
        # pos embed
        self.pe = SIRENEmbed(d_embed, n_dim, d_embed, **pe_cfg)
        # rel bias (if needed)
        self.need_rel_bias = rel_bias_cfg is not None
        if self.need_rel_bias:
            self.rel_bias = TableEmbed(**rel_bias_cfg)
        # keep empty
        self.keep_empty = keep_empty
        
    def forward(self, positions, features, offsets):
        # gather super tokens
        batch_pos = []
        batch_st_ids = []
        batch_num_st = []
        batch_st_pos = []
        batch_st_inds = []
        batch_asso = []
        batch_rel_bias = []
        batch_grid_extent = []
        batch_start = 0
        st_start = 0
        for b, batch_end in enumerate(offsets):
            # pos to grid id
            pt_pos = positions[batch_start:batch_end]
            grid_start = pt_pos.min(dim=0, keepdim=True).values
            grid_range = pt_pos.max(dim=0, keepdim=True).values - grid_start
            if self.force_square_grid:
                assert self.keep_empty
                n_dim = grid_range.shape[-1]
                grid_extent = grid_range.max() / self.grid_size * 0.5
                grid_size = torch.round(grid_range / grid_extent * 0.5).clamp(min=1).long()[0]
                st_pos = torch.stack(torch.meshgrid(
                    *[torch.arange(s, device=grid_size.device) for s in grid_size]
                ), dim=-1).view(-1, n_dim) - (grid_size.unsqueeze(0) - 1) / 2
                st_pos = st_pos * grid_extent * 2 + (grid_start + grid_range * 0.5)
                num_st = st_pos.shape[0]
                unique_ids = None
                st_inds = None
            else:
                grid_extent = grid_range / self.grid_size * 0.5
                if self.keep_empty:
                    st_pos = self.grid * grid_range + grid_start
                    st_inds = self.grid_ind.int()
                    num_st = self.grid.shape[0]
                    unique_ids = torch.arange(num_st, device=pt_pos.device)
                else:
                    grid_pos = ((pt_pos - grid_start) / grid_range).clamp(max=1-1e-6)
                    grid_id = (grid_pos * self.grid_size).floor().long()
                    grid_id = (grid_id * self.dim_offset.long()).sum(dim=-1)
                    # get unique grid ids
                    unique_ids = torch.unique(grid_id)
                    num_st = unique_ids.shape[0]
                    # gather super token positions and grid indices
                    st_pos = self.grid[unique_ids] * grid_range + grid_start
                    st_inds = self.grid_ind[unique_ids].int()
            # build associations
            asso_info = dict(
                st_pos=st_pos, pt_pos=pt_pos, 
                st_offset=st_start, pt_offset=batch_start,
                grid_extent=grid_extent,
            )
            asso = self.asso(asso_info)
            # calc relative pe
            if self.need_rel_bias:
                pt_pos_expand = pt_pos[asso[:, 0] - batch_start]
                st_pos_expand = st_pos[asso[:, 1] - st_start]
                def mapping(x):
                    return (x / grid_extent) / self.asso_range
                self.rel_bias.input_mapping = mapping
                rel_bias = self.rel_bias(st_pos_expand - pt_pos_expand)
                self.rel_bias.input_mapping = TableEmbed.IDENTITY_MAPPING
                batch_rel_bias.append(rel_bias)
            # record new start offset and write results
            batch_start = batch_end
            st_start += num_st
            batch_pos.append(pt_pos)
            batch_num_st.append(num_st)
            batch_st_pos.append(st_pos)
            batch_st_ids.append(unique_ids)
            batch_st_inds.append(st_inds)
            batch_asso.append(asso)
            batch_grid_extent.append(grid_extent)
        asso = torch.cat(batch_asso, dim=0)
        st_pos = torch.cat(batch_st_pos, dim=0)
        st_feat = self.pe(st_pos)
        info = dict(
            asso=asso,
            st_feat=st_feat,
            st_pos=st_pos,
            num_sts=batch_num_st,
            st_ids=batch_st_ids,
            st_inds=batch_st_inds,
            grid_extent=batch_grid_extent,
        )
        if not self.force_square_grid:
            info.update(
                grid_size=self.grid_size.tolist(),
            )
        if self.need_rel_bias:
            info["rel_bias"] = torch.cat(batch_rel_bias, dim=0)
        return info
        

class SuperTokenInitializerPoolingCached(SuperTokenInitializer):
    """Use SparsePooling to generate super token"""
    
    def __init__(
        self, 
        layer_idx,
        block_idx,
        grid_size,
        d_embed,
        n_dim,
        num_associate,
        custom_asso=None,   # defaults to KNN based asso
        pe_cfg=dict(num_layers=2, temperature=1),
        rel_bias_cfg=None,
        keep_empty=False,
        force_square_grid=False,
        **kwargs
    ):
        super().__init__()
        if custom_asso is None:
            self.asso = KNNAssociate(num_associate)
            self.asso_range = num_associate ** (1 / 3)
        else:
            self.asso = get_pooling_asso(custom_asso)
            self.asso_range = custom_asso.get("grid_range", 1)
        self.layer_idx = layer_idx
        self.block_idx = block_idx
        self.num_associate = num_associate
        self.is_first = layer_idx == 0 and block_idx == 0
        if self.is_first:
            self._initializer = SuperTokenInitializerPooling(
                grid_size, d_embed, n_dim, num_associate, custom_asso, pe_cfg, rel_bias_cfg, keep_empty, force_square_grid
            )
            self.clr_glb_state()
        d_last = self.get_glb_state(f"d_last", d_embed)
        self.store_glb_state(f"d_last", d_embed)
        self.upsample = d_last != d_embed
        if self.upsample:
            self.upsample_pe = SIRENEmbed(d_embed, n_dim, d_embed, **pe_cfg)
            self.upsample_feat = nn.Sequential(
                nn.Linear(d_last, d_embed),
                nn.GELU(),
                nn.BatchNorm1d(d_embed),
                nn.Linear(d_embed, d_embed),
                nn.GELU(),
            )
        self.need_rel_bias = rel_bias_cfg is not None
        if self.need_rel_bias and not self.is_first and block_idx == 0:
            self.rel_bias = TableEmbed(**rel_bias_cfg)
        if not self.is_first:
            if custom_asso is None:
                self.asso = KNNAssociate(num_associate)
            else:
                self.asso = get_pooling_asso(custom_asso)
            
    def forward(self, positions, features, offsets):
        if self.is_first:
            info = self._initializer(positions, features, offsets)
            self.store_glb_state("st_info", info)
        else:
            info = self.get_glb_state("st_info")
            # update associations
            if self.block_idx == 0:
                device = positions.device
                batch_asso = []
                batch_rel_bias = []
                batch_start = 0
                st_start = 0
                for b, (batch_end, num_st) in enumerate(zip(offsets, info["num_sts"])):
                    st_end = st_start + num_st
                    st_pos = info["st_pos"][st_start:st_end]
                    pt_pos = positions[batch_start:batch_end]
                    asso_info = dict(
                        st_pos=st_pos, pt_pos=pt_pos,
                        st_offset=st_start, pt_offset=batch_start,
                        grid_extent=info["grid_extent"][b],
                    )
                    asso = self.asso(asso_info)
                    # calc relative pe
                    if self.need_rel_bias:
                        st_pos_expand = st_pos[asso[:, 1] - st_start]
                        pt_pos_expand = pt_pos[asso[:, 0] - batch_start]
                        def mapping(x):
                            return (x / info["grid_extent"][b]) / self.asso_range
                        self.rel_bias.input_mapping = mapping
                        rel_bias = self.rel_bias(st_pos_expand - pt_pos_expand)
                        self.rel_bias.input_mapping = TableEmbed.IDENTITY_MAPPING
                        batch_rel_bias.append(rel_bias)
                    batch_asso.append(asso)
                    batch_start = batch_end
                    st_start = st_end
                asso = torch.cat(batch_asso, dim=0)
                info["asso"] = asso
                if self.need_rel_bias:
                    info["rel_bias"] = torch.cat(batch_rel_bias, dim=0)
            # update st_feat
            st_feat = self.get_glb_state("st_feat")
            if self.upsample:
                st_feat = self.upsample_pe(info["st_pos"]) + self.upsample_feat(st_feat)
            info["st_feat"] = st_feat
            self.store_glb_state("st_info", info)
        return info


def get_super_token_initializer(cfg) -> SuperTokenInitializer:
    mode = cfg["mode"]
    if mode == "fps":
        return SuperTokenInitializerFPS(**cfg)
    elif mode == "pooling":
        return SuperTokenInitializerPooling(**cfg)
    elif mode == "pooling cached":
        return SuperTokenInitializerPoolingCached(**cfg)
    else:
        raise NotImplementedError(f"Unknown super token initializer mode {mode}")