import math

import torch
from torch import nn
from torch.nn import functional as F
from einops import repeat, rearrange, reduce

from mmcv.ops.points_sampler import get_sampler_cls
from mmcv.ops import furthest_point_sample, furthest_point_sample_with_dist, knn

from mmdet3d.models.layers.pointnet_modules.builder import SA_MODULES
from mmdet3d.models.layers.pointnet_modules import PointSAModule

from .basic_blocks import FourierEmbed, RelativeEmbed, SIRENEmbed, TableEmbed

MAX_SIM_RANGE = 50

class PointSuperTokenAttention(nn.Module):
    def __init__(
        self,
        embed_ch,
        num_super_token,
        num_associate,
        sample_mode = "D-FPS",
        num_heads = 8,
        dropout = 0.1,
        sim_scale = 1,
        pe_temperature = 0.1,
        pc_range = [0, -40, -3, 70.4, 40, 1],
        relative_pe = False,
        super_token_pe = False,
        with_ffn = False,
        *args,
        **kwargs,
    ):
        super().__init__()
        self.channels = embed_ch
        self.num_super_token = num_super_token
        self.num_associate = num_associate
        self.num_heads = num_heads
        self.relative_pe = relative_pe
        self.super_token_pe = super_token_pe
        self.with_ffn = with_ffn
        self.dropout = dropout
        self.sim_scale = sim_scale / math.sqrt(embed_ch)
        
        self.pc_size = nn.Parameter(torch.tensor([pc_range[i+3] - pc_range[i] for i in range(3)]), requires_grad=False)
        self.pc_center = nn.Parameter(torch.tensor([(pc_range[i+3] + pc_range[i]) / 2 for i in range(3)]), requires_grad=False)
        self.pc_range = nn.Parameter(torch.tensor(pc_range), requires_grad=False)
        self.point_pe_scale = nn.Parameter(torch.tensor([0.1, 0.1, 0.3]), requires_grad=False)
        
        self.sampler = get_sampler_cls(sample_mode)()
        
        self.proj_q = nn.Linear(embed_ch, embed_ch)
        self.proj_k = nn.Linear(embed_ch, embed_ch)
        self.proj_v = nn.Linear(embed_ch, embed_ch)
        
        if self.relative_pe:
            self.point_pe = TableEmbed(num_heads, [32, 32, 8], math.sqrt(embed_ch), 2, self.point_pe_mapping)
        else:
            self.point_pe = FourierEmbed(embed_ch, 3, pe_temperature)
            
        if self.super_token_pe:
            if self.relative_pe:
                self.token_pe = TableEmbed(
                    num_heads, [32, 32, 8], math.sqrt(embed_ch), [i * 0.5 + 0.5 for i in range(num_heads)],
                    self.token_pe_mapping
                )
            else:
                self.token_pe = FourierEmbed(embed_ch, 3, pe_temperature)
            
        self.attn_norm = nn.LayerNorm(embed_ch)
        self.attn = nn.MultiheadAttention(embed_ch, num_heads, dropout=dropout, batch_first=True)
        
        if with_ffn:
            self.ffn = nn.Sequential(
                nn.LayerNorm(embed_ch),
                nn.Linear(embed_ch, embed_ch * 2),
                nn.GELU(),
                nn.Dropout(dropout),
                nn.Linear(embed_ch * 2, embed_ch),
            )
        
        self.out_norm = nn.LayerNorm(embed_ch)
        
        self.debug = False
        self.debug_states = {}
        
    def point_pe_mapping(self, x):
        x = x * self.point_pe_scale
        x = torch.sign(x) * torch.pow(torch.abs(x), 0.6)
        return x
    
    def token_pe_mapping(self, x):
        x = x / self.pc_size
        x = torch.sign(x) * torch.pow(torch.abs(x), 0.6)
        return x
    
    def forward(self, features, positions, offsets):
        N, D = positions.shape
        C = self.channels
        A = self.num_associate
        S = self.num_super_token
        H = self.num_heads
        M = int(C/H)
        B = len(offsets)
        device = positions.device

        if self.debug:
            self.debug_states["input"] = (positions, features, offsets)
            
        # compute association
        associations = []
        batch_start = 0
        for b, batch_end in enumerate(offsets):
            pos = positions[batch_start:batch_end].unsqueeze(0)
            feats = features[batch_start:batch_end].unsqueeze(0).permute(0, 2, 1)
            st_ids = self.sampler(pos, feats, S).long()                             # 1 x S
            center_pos = pos.gather(1, repeat(st_ids, "b s -> b s d", d=D))         # 1 x S x D
            asso_ids = knn(A, center_pos.float(), pos.float()).long()               # 1 x A x N
            asso_ids = asso_ids + b * S
            associations.append(asso_ids[0])
            batch_start = batch_end
        associations = rearrange(torch.cat(associations, dim=1), "a n -> (a n)")    # A * N
        
        # add pe / pre norm
        if not self.relative_pe:
            features = features + self.point_pe(positions)
        features = self.attn_norm(features)
        
        feat_q = self.proj_q(features)
        feat_k = self.proj_k(features)
        feat_v = self.proj_v(features)
            
        # super token iteration
        st = torch.zeros(B * S, C, device=device, dtype=feat_q.dtype)
        q_expand = repeat(feat_q, "n f -> (a n) f", a=A)
        k_expand = repeat(feat_k, "n f -> (a n) f", a=A)
        v_expand = repeat(feat_v, "n f -> (a n) f", a=A)
        # calculate similarity (A * N x 1)
        st = st.index_reduce(0, associations, q_expand, "mean", include_self=False)
        st_expand = st[associations]
        sim = (st_expand * k_expand) * self.sim_scale
        sim = reduce(sim, "x (h m) -> x h", "sum", h=H)
        if self.relative_pe:
            st_pos = torch.zeros(B * S, D, device=device, dtype=positions.dtype)
            t_pos_expand = repeat(positions, "n d -> (a n) d", a=A)
            st_pos = st_pos.index_reduce(0, associations, t_pos_expand, "mean", include_self=False)
            sim = sim + self.point_pe(st_pos[associations] - t_pos_expand)
        raw_sim = sim
        sim = sim - (torch.rand_like(sim) < self.dropout) * 1e12
        # avoid float overflow
        max_sim = torch.zeros(B * S, H, device=device, dtype=sim.dtype)
        max_sim = max_sim.index_reduce(0, associations, sim, "amax", include_self=False)
        min_sim = torch.zeros(B * S, H, device=device, dtype=sim.dtype)
        min_sim = -min_sim.index_reduce(0, associations, -sim, "amax", include_self=False)
        scale = (max_sim - min_sim).clamp(min=MAX_SIM_RANGE) / MAX_SIM_RANGE
        scale = scale[associations]
        center_sim = (max_sim + min_sim) / 2
        center_sim = center_sim[associations]
        esim = torch.exp((sim - center_sim) / scale)
        # compute sum weights (B * S x 1)
        weights = torch.full([B * S, H], 1e-8, device=device)
        weights = weights.index_add(0, associations, esim)
        
        if self.debug:
            self.debug_states["associations"] = associations
            self.debug_states["raw_sim"] = raw_sim
            self.debug_states["sim"] = sim
            self.debug_states["const"] = (B, N, D, C, A, S)
            
        # gather super token
        st = torch.zeros(B * S, H, M, device=device, dtype=esim.dtype)
        v_expand = rearrange(v_expand, "x (h m) -> x h m", h=H) * esim.unsqueeze(-1) 
        st = st.index_add(0, associations, v_expand) / weights.unsqueeze(-1)
        st = rearrange(st, "(b s) h m -> b s (h m)", b=B)
        # super token pe
        mask = None
        if self.super_token_pe:
            st_pos = torch.zeros(B * S, D, device=device, dtype=positions.dtype)
            t_pos_expand = t_pos_expand if self.relative_pe else repeat(positions, "n d -> (a n) d", a=A)
            st_pos = st_pos.index_add(0, associations, t_pos_expand * esim.sum(dim=-1, keepdim=True))
            st_pos = st_pos / weights.sum(dim=-1, keepdim=True)
            st_pos = rearrange(st_pos, "(b s) d -> b s d", b=B)
            if self.relative_pe:
                diff = st_pos.unsqueeze(1) - st_pos.unsqueeze(2)  # B 1 T D - B T 1 D -> B T T D
                bias = self.token_pe(diff)
                mask = rearrange(bias, "b t1 t2 h -> (b h) t1 t2")
            else:
                st = st + self.token_pe(st_pos)
                
        # perform attention
        if not self.training and st.shape[0] == 1:
            # this is a bug related to MHA's fast path before 2.0.1
            # disable batching to disable fast path
            st = st.squeeze(0)
        st, attn = self.attn(st, st, st, attn_mask=mask)
        if not self.training and st.dim() == 2:
            # this is a bug related to MHA's fast path before 2.0.1
            st = st.unsqueeze(0)

        # upsample
        sim = rearrange(sim, "(a n) h -> a n h", a=A)
        max_sim = sim.max(dim=0, keepdim=True).values
        min_sim = sim.min(dim=0, keepdim=True).values
        scale = (max_sim - min_sim).clamp(min=MAX_SIM_RANGE) / MAX_SIM_RANGE
        center_sim = (max_sim + min_sim) / 2
        esim = torch.exp((sim - center_sim) / scale)
        weights = esim.sum(dim=0).unsqueeze(-1) + 1e-8
        esim = rearrange(esim, "a n h -> (a n) h 1")
        st_expand = rearrange(st, "b s (h m) -> (b s) h m", h=H)[associations]
        t = reduce(st_expand * esim, "(a n) h m -> n h m", "sum", a=A) / weights
        t = rearrange(t, "n h m -> n (h m)")
        
        # residual and ffn
        features = features + t
        if self.with_ffn:
            features = features + self.ffn(features)
        
        return self.out_norm(features)
        