from typing import List, Optional, Tuple, Union
import warnings

import numpy as np
import torch
import torch.nn as nn

from mmcv.runner import BaseModule, force_fp32
from mmdet.models.builder import NECKS
from mmcv.utils import build_from_cfg
from mmcv.cnn.bricks.registry import (
    ATTENTION,
    PLUGIN_LAYERS,
    POSITIONAL_ENCODING,
    FEEDFORWARD_NETWORK,
    NORM_LAYERS,
)
from mmdet.models import (
    DETECTORS,
    BaseDetector,
    build_backbone,
    build_head,
    build_neck,
)
from ..blocks import DeformableFeatureAggregation as DFG

__all__ = ["DeformableCam2Bev"]


@NECKS.register_module()
class DeformableCam2Bev(BaseModule):
    def __init__(
        self,
        bev_bank: dict,
        anchor_encoder: dict,
        deformable_model: dict,
        num_encoder: int = 6,
        sampler: dict = None,
        # decouple_attn: bool = True,
        init_cfg: dict = None,
        img_bev_encoder_neck=None,
        img_bev_encoder_backbone=None,
        **kwargs,
    ):
        super(DeformableCam2Bev, self).__init__(init_cfg)
        self.num_encoder = num_encoder
        # self.decouple_attn = decouple_attn

        # =========== build modules ===========
        def build(cfg, registry):
            if cfg is None:
                return None
            return build_from_cfg(cfg, registry)

        self.bev_bank = build(bev_bank, PLUGIN_LAYERS)
        self.anchor_encoder = build(anchor_encoder, POSITIONAL_ENCODING)
        # self.sampler = build(sampler, BBOX_SAMPLERS)
        self.layers= nn.ModuleList()
        for i in range(num_encoder):
            self.layers.append(build(deformable_model, ATTENTION))
        # self.img_bev_encoder_backbone = build_backbone(img_bev_encoder_backbone)
        # self.img_bev_encoder_neck = build_neck(img_bev_encoder_neck)
        # self.embed_dims = self.bev_bank.embed_dims
    def init_weights(self):
        for m in self.modules():
            if hasattr(m, "init_weight"):
                m.init_weight()

    # def bev_encoder(self, x):
    #     x = self.img_bev_encoder_backbone(x)
    #     x = self.img_bev_encoder_neck(x)
    #     if type(x) in [list, tuple]:
    #         x = x[0]
    #     return x
    def forward(
        self,
        feature_maps: Union[torch.Tensor, List],
        **metas,
    ):

        if isinstance(feature_maps, torch.Tensor):
            feature_maps = [feature_maps]
        batch_size = feature_maps[0].shape[0]

        # ========= get instance info ============
        (
            instance_feature,
            anchor,
            temp_instance_feature,
            temp_anchor,
            time_interval,
        ) = self.bev_bank.get(
            batch_size, metas, dn_metas=None,
        ) 
        anchor_embed = self.anchor_encoder(anchor)
        for layer in self.layers:
            instance_feature = layer(
                    instance_feature,
                    anchor,
                    anchor_embed,
                    feature_maps,
                    metas,
            )
        # cache current instances for temporal modeling
        self.bev_bank.cache(
            instance_feature, anchor, metas,
        )
        bev_feature = instance_feature.permute(0,2,1).reshape(batch_size,-1,*self.bev_bank.bev_size)
        bev_feature = bev_feature.permute(0,1,3,2)
        bev_feature = bev_feature.contiguous()
        # bev_feature = self.bev_encoder(bev_feature)
        return [bev_feature]