import numpy as np

import torch
from torch import nn
from einops import repeat, rearrange, reduce

from mmcv.ops.points_sampler import get_sampler_cls
from mmcv.ops import knn
from mmdet3d.models.layers import make_sparse_convmodule

from spconv.pytorch import SparseConvTensor

from .basic_blocks import SIRENEmbed, MultiHeadAttentionNoParam

class SuperTokenFuser(nn.Module):
    def __init__(
        self, 
        d_embed,
        dropout,
        with_ffn,
        norm_loc,
    ):
        super().__init__()
        self.norm_loc = norm_loc
        self.with_ffn = with_ffn
        if with_ffn:
            self.ffn = nn.Sequential(
                nn.Linear(d_embed, d_embed * 2),
                nn.GELU(),
                nn.Dropout(dropout),
                nn.Linear(d_embed * 2, d_embed)
            )
            self.ffn_norm = nn.BatchNorm1d(d_embed)
    def process_ffn(self, features):
        if self.with_ffn and self.norm_loc == "pre":
            return features + self.ffn(self.ffn_norm(features))
        if self.with_ffn and self.norm_loc == "post":
            return self.ffn_norm(features + self.ffn(features))
        return features
    def forward(self, positions, info: dict, **kwargs):
        """initialize super token

        Args:
            features (torch.Tensor): N x C
            st_dict (dict): dictionary containing more info needed

        Returns:
            new_features (torch.Tensor): N x C
        """
        pass


class SuperTokenFuserAttn(SuperTokenFuser):    
    def __init__(
        self, 
        d_embed,
        n_dim,
        num_heads,
        dropout,
        pe_cfg = None,
        with_pe = False,
        residual = True,
        norm_loc = "pre",
        with_ffn = False,
        **kwargs,
    ):
        super().__init__(d_embed, dropout, with_ffn, norm_loc)
        self.residual = residual
        self.norm_loc = norm_loc
        self.with_pe = with_pe
        self.attn = nn.MultiheadAttention(d_embed, num_heads, dropout, batch_first=True)
        if pe_cfg is not None and with_pe:
            self.pe = SIRENEmbed(d_embed, n_dim, d_embed, **pe_cfg)
        if residual:
            self.norm = nn.BatchNorm1d(d_embed)
        else:
            self.norm_loc = "none"
        
    def forward(self, features: torch.Tensor, info: dict):
        # extract info
        assert all(k in info for k in ["st_pos", "num_sts"])
        positions : torch.Tensor = info["st_pos"]   # N x D
        num_sts : list = info["num_sts"]
        
        # constants
        N, C = features.shape
        B = len(num_sts)
        maxlen = max(num_sts)
        device = features.device
        
        residual = features
        if self.norm_loc == "pre":
            features = self.norm(features)
            
        if self.with_pe:
            features_with_pe = features + self.pe(positions)
        
        # batch and build pad mask
        token = torch.zeros(B, maxlen, C, device=device)
        token_with_pe = torch.zeros(B, maxlen, C, device=device) if self.with_pe else None
        pad_mask = torch.zeros(B, maxlen, dtype=torch.bool, device=device)
        start = 0
        for b, n in enumerate(num_sts):
            end = start + n
            token[b, :n] = features[start:end]
            if self.with_pe:
                token_with_pe[b, :n] = features_with_pe[start:end]
            pad_mask[b, n:] = True
            start = end
        if self.with_pe:
            q = k = token_with_pe
            v = token
        else:
            q = k = v = token
        
        # this is a bug related to MHA's fast path before 2.0.1
        # disable batching to skip fast path as a temporary fix
        if not self.training and q.shape[0] == 1:
            q, k, v = q[0], k[0], v[0]
            pad_mask = None if pad_mask is None else pad_mask[0]
        token, attn = self.attn(q, k, v, key_padding_mask=pad_mask)
        # recover from temporary bug fix
        if not self.training and q.dim() == 2:
            token = token.unsqueeze(0)
            
        # unbatch
        out = []
        start = 0
        for b, n in enumerate(num_sts):
            end = start + n
            out.append(token[b, :n])
            start = end
        out = torch.cat(out, dim=0)
            
        if self.residual:
            out = out + residual
        if self.norm_loc == "post":
            out = self.norm(out)
        
        out = self.process_ffn(out)
        
        return out, { "attn": attn }
            

class SuperTokenFuserNormAttn(SuperTokenFuser):
    def __init__(
        self, 
        d_embed,
        n_dim,
        num_heads,
        dropout,
        pe_cfg = None,
        with_pe = False,
        residual = True,
        norm_loc = "pre",
        with_ffn = False,
        **kwargs,
    ):
        super().__init__(d_embed, dropout, with_ffn, norm_loc)
        self.residual = residual
        self.norm_loc = norm_loc
        self.with_pe = with_pe
        self.attn = MultiHeadAttentionNoParam(d_embed, num_heads, dropout)
        self.proj_q = nn.Sequential(nn.Linear(d_embed, d_embed, bias=False), nn.BatchNorm1d(d_embed))
        self.proj_k = nn.Sequential(nn.Linear(d_embed, d_embed, bias=False), nn.BatchNorm1d(d_embed))
        self.proj_v = nn.Sequential(nn.Linear(d_embed, d_embed, bias=False), nn.BatchNorm1d(d_embed))
        self.proj_o = nn.Sequential(nn.Linear(d_embed, d_embed, bias=False), nn.BatchNorm1d(d_embed))
        if pe_cfg is not None and with_pe:
            self.pe = SIRENEmbed(d_embed, n_dim, d_embed, **pe_cfg)
        if residual:
            self.norm = nn.BatchNorm1d(d_embed)
        else:
            self.norm_loc = "none"
        
    def forward(self, features: torch.Tensor, info: dict):
        # extract info
        assert all(k in info for k in ["st_pos", "num_sts"])
        positions : torch.Tensor = info["st_pos"]   # N x D
        num_sts : list = info["num_sts"]
        
        # constants
        N, C = features.shape
        B = len(num_sts)
        maxlen = max(num_sts)
        device = features.device
        
        residual = features
        if self.norm_loc == "pre":
            features = self.norm(features)
            
        # in proj
        if self.with_pe:
            features_with_pe = features + self.pe(positions)
            q_raw = self.proj_q(features_with_pe)
            k_raw = self.proj_k(features_with_pe)
        else:
            q_raw = self.proj_q(features)
            k_raw = self.proj_k(features)
        v_raw = self.proj_v(features)
        
        # batch and build pad mask
        q = torch.zeros(B, maxlen, C, device=device)
        k = torch.zeros(B, maxlen, C, device=device)
        v = torch.zeros(B, maxlen, C, device=device)
        pad_mask = torch.zeros(B, maxlen, dtype=torch.bool, device=device)
        start = 0
        for b, n in enumerate(num_sts):
            end = start + n
            q[b, :n] = q_raw[start:end]
            k[b, :n] = k_raw[start:end]
            v[b, :n] = v_raw[start:end]
            pad_mask[b, n:] = True
            start = end
        
        # custom attention (remove projections)
        tokens, attn = self.attn(q, k, v, kv_mask=pad_mask)
        
        # unbatch and out proj
        out = []
        start = 0
        for b, n in enumerate(num_sts):
            end = start + n
            out.append(tokens[b, :n])
            start = end
        out = torch.cat(out, dim=0)
        out = self.proj_o(out)
            
        if self.residual:
            out = out + residual
        if self.norm_loc == "post":
            out = self.norm(out)
        
        out = self.process_ffn(out)
        
        return out, { "attn": attn }


class SuperTokenFuserSE(SuperTokenFuser):
    def __init__(
        self,
        d_embed,
        dropout,
        pool_mode = "mean",
        norm_loc = "pre",
        with_ffn = False,
        **kwargs,
    ):
        super().__init__(d_embed, dropout, with_ffn, norm_loc)
        assert norm_loc in ["pre", "post"]
        assert pool_mode in ["mean", "min", "max"]
        self.norm_loc = norm_loc
        self.pool_mode = pool_mode
        self.norm = nn.LayerNorm(d_embed)
        self.base_net = nn.Identity()
        self.se_net = nn.Sequential(
            nn.Linear(d_embed, d_embed, bias=False),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(d_embed, d_embed, bias=False),
            nn.Sigmoid(),
        )
        
        
    def forward(self, features: torch.Tensor, info: dict):    
        # extract info
        assert all(k in info for k in ["st_pos", "num_sts"])
        positions : torch.Tensor = info["st_pos"]   # N x D
        num_sts : list = info["num_sts"]
        
        # constants
        B = len(num_sts)
        N, C = features.shape
        device = features.device
            
        if self.norm_loc == "pre":
            feats = self.norm(features)
        else:
            feats = features
            
        # base net
        feats = self.base_net(feats)
        
        # squeeze
        s_feats = torch.zeros(B, C, device=device)
        start = 0
        for b, n in enumerate(num_sts):
            end = start + n
            if self.pool_mode == "mean":
                s_feats[b] = feats[start:end].mean(dim=0)
            elif self.pool_mode in ["min", "max"]:
                op = getattr(torch, self.pool_mode)
                s_feats[b] = op(feats[start:end], dim=0).values
            start = end
            
        # process
        s_feats = self.se_net(s_feats)
        
        # excitation
        e_feats = []
        start = 0
        for b, n in enumerate(num_sts):
            end = start + n
            e_feats.append(feats[start:end] * s_feats[b])
            start = end
        e_feats = torch.cat(e_feats, dim=0)
        
        feats = features + e_feats
        if self.norm_loc == "post":
            feats = self.norm(feats)
            
        out = self.process_ffn(feats)
        
        return out, {}


class RepConvBlock(nn.Module):
    def __init__(
        self,
        d_embed,
        n_dim,
        kernel_sizes = [3, 5, 7],
        depthwise = True,
        dropout = 0.0,
    ):
        super().__init__()
        num_groups = d_embed if depthwise else 1
        conv = getattr(nn, f"Conv{n_dim}d")
        norm = getattr(nn, f"BatchNorm{n_dim}d")
        self.paths = nn.ModuleList()
        self.norms = nn.ModuleList()
        for k in kernel_sizes:
            if isinstance(k, int):
                k = [k] * n_dim
            p = [k_ // 2 for k_ in k]
            self.paths.append(nn.Sequential(
                conv(d_embed, d_embed, k, 1, p, groups=num_groups),
                norm(d_embed)
            ))
        self.norm = norm(d_embed)
        self.ffn = nn.Sequential(
            nn.GELU(),
            nn.Dropout(dropout),
            conv(d_embed, d_embed, 1),
            nn.GELU(),
            norm(d_embed),
        )
    def forward(self, x):
        residual = x
        x = self.norm(x)
        x = sum(path(x) for path in self.paths)
        x = residual + self.ffn(x)
        return x

    
class SuperTokenFuserConv(SuperTokenFuser):
    def __init__(
        self,
        d_embed,
        n_dim,
        dropout,
        num_blocks=2,
        conv_cfg=dict(
            kernel_sizes=[3, 5, 7],
            depthwise=True,
        ),
        norm_loc = "pre",
        **kwargs,
    ):
        super().__init__()
        self.norm_loc = norm_loc
        if norm_loc in ["pre", "post"]:
            self.norm = nn.BatchNorm1d(d_embed)
        self.blocks = nn.ModuleList()
        for b in range(num_blocks):
            self.blocks.append(RepConvBlock(d_embed, n_dim, dropout=dropout, **conv_cfg))
        
    def forward(self, features: torch.Tensor, info: dict):
        assert all(k in info for k in ["num_sts", "st_ids", "st_inds", "grid_size"])
        st_ids = info["st_ids"]
        st_inds = info["st_inds"]
        grid_size = info["grid_size"]
        batch_num_st = info["num_sts"]
        B = len(st_ids)
        
        residual = features
        if self.norm_loc == "pre":
            features = self.norm(features)
        
        # construct sparse conv tensor
        voxel_inds = []
        voxel_feats = []
        st_start = 0
        for b in range(B):
            st_end = st_start + batch_num_st[b]
            voxel_inds.append(
                torch.cat(
                    [
                        torch.ones_like(st_inds[b][..., :1]) * b, 
                        st_inds[b]
                    ], dim=-1
                )
            )
            voxel_feats.append(features[st_start:st_end])
            st_start = st_end
        voxel_inds = torch.cat(voxel_inds, dim=0)
        voxel_feats = torch.cat(voxel_feats, dim=0)
        sp_feats = SparseConvTensor(voxel_feats, voxel_inds, grid_size, B)
        feats = sp_feats.dense()
        
        # process through block / dropout / residual
        for block in self.blocks:
            feats = block(feats)
        
        # restore features
        feats = feats.view(*feats.shape[:2], -1).permute(0, 2, 1) # B x X * Y * Z x C
        features = torch.zeros_like(features)
        st_start = 0
        for b, n in enumerate(batch_num_st):
            st_end = st_start + n
            features[st_start:st_end] = feats[b, st_ids[b]]
            st_start = st_end
            
        features = residual + features
        if self.norm_loc == "post":
            features = self.norm(features)
            
        features = self.process_ffn(features)
        
        return features, {}
        

def get_super_token_fuser(cfg) -> SuperTokenFuser:
    mode = cfg["mode"]
    if mode == "attn":
        return SuperTokenFuserAttn(**cfg)
    if mode == "norm attn":
        return SuperTokenFuserNormAttn(**cfg)
    elif mode == "se":
        return SuperTokenFuserSE(**cfg)
    elif mode == "conv":
        raise NotImplementedError("Conv super token fuser is not implemented yet")
        return SuperTokenFuserConv(**cfg)
    else:
        raise NotImplementedError(f"Unknown super token initializer mode {mode}")