import math

import numpy as np

import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.checkpoint import checkpoint
from einops import repeat, rearrange, reduce

from mmdet3d.models.layers.pointnet_modules.builder import SA_MODULES
from mmdet3d.models.layers.pointnet_modules import PointSAModule

MAX_SIM_RANGE = 20

class SparseAttention(nn.Module):
    """compute sparsely connected attention"""
    def __init__(
        self,
        num_ch,
        num_heads = 1.0,
        drop_sim = 0.1,
        drop_path = 0.3,
        reduce_query = "none",
        with_proj = True,
        same_kv = True,
        residual = True,
        norm_loc = "pre",
        sim_scale = 1.0,
        enable_checkpoint = False,
        norm_after_proj = False,
    ):
        super().__init__()
        self.enable_checkpoint = enable_checkpoint
        self.num_heads = num_heads
        self.drop_sim = drop_sim
        self.drop_path = drop_path
        self.reduce_query = reduce_query
        self.with_proj = with_proj
        self.same_kv = same_kv
        self.residual = residual
        self.norm_loc = norm_loc
        self.sim_scale = sim_scale / math.sqrt(num_ch)
        if self.with_proj:
            def build_proj():
                if norm_after_proj:
                    return nn.Sequential(nn.Linear(num_ch, num_ch), nn.BatchNorm1d(num_ch))
                else:
                    return nn.Linear(num_ch, num_ch)
            self.proj_q = build_proj()
            self.proj_k = build_proj()
            self.proj_v = build_proj() if not self.same_kv else None
            self.proj_o = build_proj()
        else:
            self.proj_q = self.proj_k = self.proj_v = self.proj_o = nn.Identity()
        if residual:
            self.norm = nn.LayerNorm(num_ch)
        else:
            self.norm_loc = "none"

    def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, associations, bias=None):
        """compute sparsely connected attention
        
        Args:
            q (torch.Tensor): query of dim Q x C
            k (torch.Tensor): key of dim K x C 
            v (torch.Tensor): value of dim K x C (same as key)
            associations (torch.Tensor): describe KV -> Q connections of dim N x 2
            bias (torch.Tensor): bias of dim N x H
            num_heads (int, optional): number of heads. Defaults to 1
            drop_sim (float, optional): dropout rate. Defaults to 0
            init_query (str, optional): reduce mode for query initialization.
    
        Returns:
            out (torch.Tensor): output of dim Q x C
            weights (torch.Tensor): normalized weights of dim N x num_heads
        """
        if self.enable_checkpoint:
            return checkpoint(self._forward, q, k, v, associations, bias)
        else:
            return self._forward(q, k, v, associations, bias)
        
    def _forward(self, q, k, v, associations, bias):
        """real forward function"""
        Q, C = q.shape
        KV = k.shape[0]
        H = self.num_heads
        D = C // H
        N = associations.shape[0]
        
        # prepare residual and norm
        residual = q
        if self.norm_loc == "pre":
            q = self.norm(q)
        
        # project
        q = self.proj_q(q)
        k = self.proj_k(k)
        v = self.proj_v(v) if not self.same_kv else k
        
        # generate association ids
        src_ids = repeat(associations[..., 0].long(), "n -> n c", c=C)
        dst_ids = repeat(associations[..., 1].long(), "n -> n c", c=C)
        
        # expand kv
        k_expand = k.gather(0, src_ids)
        v_expand = v.gather(0, src_ids) if not self.same_kv else k_expand
        
        # get q by pooling
        if self.reduce_query != "none":
            q = q + q.scatter_reduce(0, dst_ids, k_expand, self.reduce_query, include_self=False)
        q_expand = q.gather(0, dst_ids)
        
        # convert to head based
        q_expand = rearrange(q_expand, "n (h d) -> n h d", h=H)
        k_expand = rearrange(k_expand, "n (h d) -> n h d", h=H)
        v_expand = rearrange(v_expand, "n (h d) -> n h d", h=H)
        
        # compute sim
        sim = (q_expand * k_expand).sum(dim=-1)
        if bias is not None:
            sim = sim + bias
        sim = sim * self.sim_scale
        
        # normalize for each query
        max_sim = torch.zeros(Q, H, device=sim.device, dtype=sim.dtype)
        min_sim = torch.zeros(Q, H, device=sim.device, dtype=sim.dtype)
        max_sim = max_sim.scatter_reduce(0, dst_ids[..., :H], sim, "amax", include_self=False)
        min_sim = - min_sim.scatter_reduce(0, dst_ids[..., :H], -sim, "amax", include_self=False)
        scale = (max_sim - min_sim).clamp(min=MAX_SIM_RANGE) / MAX_SIM_RANGE
        scale = scale.gather(0, dst_ids[..., :H])
        center = (max_sim + min_sim) / 2
        center = center.gather(0, dst_ids[..., :H])
        sim = (sim - center) / scale
        
        # apply dropout
        if self.drop_sim > 0 and self.training:
            sim = sim - (torch.rand_like(sim) < self.drop_sim) * 1e12
        
        # softmax
        sim = sim.exp()
        weights = torch.zeros(Q, H, device=sim.device, dtype=sim.dtype)
        weights = weights.scatter_add(0, dst_ids[..., :H], sim) + 1e-8
        weights = sim / weights.gather(0, dst_ids[..., :H])    
        
        # matmul
        v_expand = v_expand * weights.unsqueeze(-1)
        v_expand = rearrange(v_expand, "n h d -> n (h d)")
        out = torch.zeros(Q, C, device=v_expand.device, dtype=v_expand.dtype)
        out = out.scatter_add(0, dst_ids, v_expand)
        
        # out proj and drop path
        out = self.proj_o(out)
        if self.drop_path > 0 and self.training:
            mask = torch.rand_like(out[..., [0]]) > self.drop_path
            out = out * mask
        
        # residual and norm
        if self.residual:
            out = out + residual
        if self.norm_loc == "post":
            out = self.norm(out)
        
        return out, weights