# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import Tensor
from torch.nn import Conv2d
from mmdet.models.detectors.base import ForwardResults
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
from mmdet.models.layers.transformer import DetrTransformerEncoderLayer
from mmdet.models.layers import SinePositionalEncoding

@MODELS.register_module()
class Slot_detector(SingleStageDetector):

    def __init__(self,
                 backbone: ConfigType,
                 neck: ConfigType,
                 bbox_head: ConfigType,
                 with_attn=False,
                 bev_img_shape=(512,512),
                 attn_dims = 256,
                 backbone_downsample_scale = (16,32),
                 backbone_out_dims = (1024,2048),
                 train_cfg: OptConfigType = None,
                 test_cfg: OptConfigType = None,
                 data_preprocessor: OptConfigType = None,
                 init_cfg: OptMultiConfig = None) -> None:
        super().__init__(
            backbone=backbone,
            neck=neck,
            bbox_head=bbox_head,
            train_cfg=train_cfg,
            test_cfg=test_cfg,
            data_preprocessor=data_preprocessor,
            init_cfg=init_cfg)
        self.with_attn = with_attn

        if self.with_attn:
            self.bev_img_shape = bev_img_shape
            self.attn_dims = attn_dims
            self.backbone_downsample_scale = backbone_downsample_scale
            self.backbone_out_dims = backbone_out_dims
            self.pos_encoding = SinePositionalEncoding(num_feats=self.attn_dims/2,normalize=True)
            self.encoder = torch.nn.ModuleList()
            self.feature_reduce = torch.nn.ModuleList()
            for i in range(2):
                self.encoder.append(DetrTransformerEncoderLayer().cuda())
                self.feature_reduce.append(Conv2d(self.backbone_out_dims[i],self.attn_dims,1,device='cuda'))
            # self.encoder.append()

            # self.encoder =[DetrTransformerEncoderLayer().cuda() for i in range(2)]
            # self.feature_reduce = [Conv2d(self.backbone_out_dims[i],self.attn_dims,1,device='cuda') for i in range(2)]
    def _forward(self, batch_inputs: Tensor,data_samples=None):
        feature = self.extract_feat(batch_inputs)
        outs = self.bbox_head.forward(feature)
        return outs
        
    def extract_feat(self, batch_inputs: Tensor):
        x = self.backbone(batch_inputs)
        if self.with_attn:
            attn_outs = tuple(self.attn_feature_process(x))
            if self.with_neck:
                feats = self.neck(attn_outs)
                return feats
        if self.with_neck:
            x = self.neck(x)
            return x
    def attn_feature_process(self,batch_inputs: Tensor):
        attn_outs = []
        for i in range(2):
            bs,_,h,w = batch_inputs[i].shape
            mask = batch_inputs[i].new_ones([bs,h,w])
            padding_mask = batch_inputs[i].new_ones([bs,h*w])
            pos_embed = self.pos_encoding(mask)
            pos_embed = pos_embed.view(bs,self.attn_dims,-1).permute(0,2,1)
            query = self.feature_reduce[i](batch_inputs[i])
            query = query.view(bs,self.attn_dims,-1).permute(0,2,1)
            attn_out = self.encoder[i](query,pos_embed,padding_mask)
            attn_out = attn_out.view(bs,h,w,-1).permute(0,3,1,2)
            attn_out = attn_out.contiguous()
            attn_outs.append(attn_out)
        return attn_outs


