from typing import Dict, List, Optional, Tuple, Union

import math

import numpy as np

import torch
from torch import Tensor
from torch import nn as nn

from mmcv.cnn.bricks.transformer import FFN

from mmdet3d.models.layers import SparseBasicBlock, make_sparse_convmodule
from mmdet3d.registry import MODELS

from .universal_sparse_super_token_attention import AgentAttention
from .super_token_preprocessor import SuperTokenPreprocessor
from .utils import FourierEmbed

# NOTE: requires spconv 2.x
from spconv.pytorch import SparseConvTensor, SparseAvgPool3d, SparseAvgPool2d, SparseAvgPool1d, SparseSequential

def build_sparse_avg_pool(dim, *args, **kwargs):
    if dim == 1:
        return SparseAvgPool1d(*args, **kwargs)
    elif dim == 2:
        return SparseAvgPool2d(*args, **kwargs)
    elif dim == 3:
        return SparseAvgPool3d(*args, **kwargs)
    else:
        raise ValueError(f"Unsupported dim {dim} for sparse avg pooling")
    
    
class SparseSuperTokenEncoderBlock(nn.Module):
    def __init__(
        self,
        ch_base: int,
        ch_ffn: int,
        dim: int = 3,
        pe_temperature: float = 0.1,
        sta_cfg: dict = dict(
            num_heads = 8,
            num_super_token_iters = 1,
            dropout = 0.1,
        ),
        block_index: int = 0,
        layer_idx: int = -1, # for indice_key
    ):
        super().__init__()
        self.block_index = block_index
        
        # STA
        self.ln = nn.BatchNorm1d(ch_base)
        self.sta = AgentAttention(ch_base, num_heads=8, agent_size=35)
        self.local = SparseSequential(
            make_sparse_convmodule(
            ch_base, ch_base, (3, 3, 1),
            padding=(1, 1, 0),
            conv_type=f"SubMConv{dim}d",
            order=("conv",),
            indice_key=f"encoder_cpe_l{layer_idx}" if layer_idx >= 0 else None,
        ),
            nn.BatchNorm1d(ch_base),
            nn.GELU(),
            nn.Linear(ch_base, ch_base),
        )
        
        # FFN
        self.ffn = nn.Sequential(
            nn.BatchNorm1d(ch_base),
            nn.Linear(ch_base, ch_ffn),
            nn.GELU(),
            nn.Dropout(0.1),
            nn.Linear(ch_ffn, ch_base))
        
        self.debug = False
        self.debug_states = {}

    def set_debug(self, value):
        self.debug = value
        self.debug_states = {}
        self.sta.set_debug(value)

    def get_debug_states(self):
        self.debug_states.update(self.sta.get_debug_info())
        return self.debug_states
    
    def forward(self, x: SparseConvTensor, sta_metas: dict):

        
        resduial = x.features
        
        x = x.replace_feature(self.ln(x.features))
        
        feat = x.features
        pos = x.indices[:, 1:].float().contiguous()
        feat = self.sta(feat,
                        pos,
                        batch_size=x.batch_size,
                        linked_pair=sta_metas['linked_pairs'],
                        H_indice=sta_metas['super_token_src'])
        
        x = self.local(x)
        x = x.replace_feature(x.features + feat + resduial)

        # ffn
        x = x.replace_feature(x.features + self.ffn(x.features))
        
        return x
    
    
@MODELS.register_module()
class SparseSuperTokenEncoder(nn.Module):
    def __init__(
        self, 
        in_channels: int,
        out_channels: int,
        encoder_channels = [128],
        encoder_blocks = [8],
        downsample_strides = [1],
        ffn_ratio: int = 4,
        sparse_shape = [1, 360, 360],
        layer_sta_cfg_base: Dict = dict(),
        layer_sta_cfgs: List[Dict] = [],
        layer_sampling_cfgs: List[List[Dict]] = [],
        layer_association_cfgs: List[List[Dict]] = [],
        return_middle_feats: bool = False,
        return_sparse_output: bool = False,
        z_index: int = 0, # NOTE: adapter for bevfusion
        pe_temperature: float = 0.1,
        *args,
        **kwargs,
    ) -> None:
        super().__init__()
        
        # constants
        norm_cfg = dict(type="LN")
        dim = len(sparse_shape)
        conv_type_subm = f"SubMConv{dim}d"
        conv_type = f"SparseConv{dim}d"
        self.sparse_shape = sparse_shape
        self.return_middle_feats = return_middle_feats
        self.return_sparse_output = return_sparse_output
        self.z_index = z_index
        self.num_layers = len(encoder_blocks)
        
        # convert args
        if isinstance(layer_sta_cfgs, dict):
            layer_sta_cfgs = [layer_sta_cfgs] * self.num_layers
        if isinstance(layer_sampling_cfgs[0], dict):
            layer_sampling_cfgs = [layer_sampling_cfgs] * self.num_layers
        if isinstance(layer_association_cfgs, dict) or isinstance(layer_association_cfgs[0], dict):
            layer_association_cfgs = [layer_association_cfgs] * self.num_layers
            
        # in conv
        base_channels = encoder_channels[0]
        self.in_conv = make_sparse_convmodule(
            in_channels, base_channels, 3,
            stride=1, padding=1,
            norm_cfg=norm_cfg, conv_type=conv_type_subm,
            order=('conv', 'norm'),
        )
        self.out_conv = None
        
        # layers
        self.downsamples = nn.ModuleList()
        self.preprocessors = []
        self.layers = nn.ModuleList()
        last_ch = base_channels
        z_size = sparse_shape[z_index]
        layer_index = 0
        for ch, nblock, ds in zip(encoder_channels, encoder_blocks, downsample_strides):
            # downsample module
            if (ds == 1 or np.prod(ds) == 1) and ch == last_ch:
                self.downsamples.append(nn.Identity())
            else:
                if isinstance(ds, int):
                    ds = [ds] * dim
                self.downsamples.append(make_sparse_convmodule(
                    last_ch, ch, ds,
                    stride=ds, padding=0,
                    norm_cfg=norm_cfg, conv_type=conv_type
                ))
                last_ch = ch
                z_size = z_size // ds[z_index]
            # preprocessor
            sampling_cfg = layer_sampling_cfgs[layer_index]
            association_cfg = layer_association_cfgs[layer_index]
            self.preprocessors.append(SuperTokenPreprocessor(sampling_cfg, association_cfg))
            # process module
            layer_sta_cfg = layer_sta_cfg_base.copy()
            layer_sta_cfg.update(layer_sta_cfgs[layer_index])
            blocks = nn.ModuleList()
            for bid in range(nblock):
                blocks.append(SparseSuperTokenEncoderBlock(
                    ch, ch * ffn_ratio, 
                    len(sparse_shape), 
                    block_index=bid, 
                    sta_cfg=layer_sta_cfg,
                    layer_idx=layer_index,
                    pe_temperature=pe_temperature
                ))
            self.layers.append(blocks)
            last_ch = ch
            layer_index += 1
        
        # out conv
        out_conv_ksize = [1] * dim
        out_conv_ksize[z_index] = z_size
        self.out_conv = SparseSequential(
            nn.BatchNorm1d(encoder_channels[-1]),
            make_sparse_convmodule(
            encoder_channels[-1], out_channels, out_conv_ksize,
            stride=1, padding=0,
            norm_cfg=norm_cfg, conv_type=conv_type,
            order=('conv', 'norm'),
        ))
        
        # debug
        self.debug = False
        self.debug_states = {}

    def set_debug(self, value):
        self.debug = value
        self.debug_states = {}
        for layer in self.layers:
            for block in layer:
                block.set_debug(value)
                
    def get_training_stat(self):
        stats = {}
        for lid, layer in enumerate(self.layers):
            for bid, block in enumerate(layer):
                block: SparseSuperTokenEncoderBlock
                stats[f"stat/l{lid}_b{bid}/attn_range"] = block.sta.stat_attn_range
                stats[f"stat/l{lid}_b{bid}/norm_in"] = block.sta.stat_input_norm
                stats[f"stat/l{lid}_b{bid}/norm_out"] = block.sta.stat_output_norm
        return stats

    def get_debug_states(self):
        for i, layer in enumerate(self.layers):
            self.debug_states[f"layer{i}"] = []
            for block in layer:
                self.debug_states[f"layer{i}"].append(block.get_debug_states())
        return self.debug_states
        
    def forward(self,
                voxel_features: Tensor,
                coors: Tensor,
                batch_size: Tensor,
                test_mode: bool = False) -> Tuple[Tensor, tuple]:
        # # test only
        # self.set_debug(True)
        
        # input conv
        coors = coors.int()
        input_sp_tensor = SparseConvTensor(voxel_features, coors,
                                           self.sparse_shape, batch_size)
        x = self.in_conv(input_sp_tensor)
        
        # forward layers
        feats = []
        for lid, (downsample, layer) in enumerate(zip(self.downsamples, self.layers)):
            # downsample
            x = downsample(x)
            # preprocess
            batch_token_ids = [torch.nonzero(x.indices[:, 0] == b, as_tuple=True)[0] for b in range(batch_size)]
            metas = self.preprocessors[lid](x.indices[:, 1:], batch_token_ids)
            # go through blocks
            for bid, block in enumerate(layer):
                x = block(x, metas[0])
            if self.return_middle_feats:
                feats.append(x)
            
        # process output
        out = self.out_conv(x)
        
        # to dense bev
        if not self.return_sparse_output:
            out = out.dense()
            out = out.squeeze(self.z_index + 2)
            
        # # test only
        # if self.debug:
        #     torch.save(self.get_debug_states(), "debug_states.pth")
        #     raise SystemExit()
        
        if self.return_middle_feats:
            return out, feats
        else:
            return out