# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32

from mmdet.models.builder import ROI_EXTRACTORS
from mmdet.models.roi_heads.roi_extractors.base_roi_extractor import BaseRoIExtractor


@ROI_EXTRACTORS.register_module()
class TransformerRoiExtractor(BaseRoIExtractor):
    """Extract RoI features from all level feature maps levels, use transformer
    to adaptive use the features from all fpn output.

    Args:
        aggregation (str): The method to aggregate multiple feature maps.
            Options are 'sum', 'concat'. Default: 'sum'.
        pre_cfg (dict | None): Specify pre-processing modules. Default: None.
        post_cfg (dict | None): Specify post-processing modules. Default: None.
        kwargs (keyword arguments): Arguments that are the same
            as :class:`BaseRoIExtractor`.
    """

    def __init__(self,
                 attention_config=None,
                 query_mode="max",
                 **kwargs):
        super(TransformerRoiExtractor, self).__init__(**kwargs)
        assert query_mode in ["max"], "query mode must in ['max'], but with query_mode = {}".format(query_mode)
        self.query_mode = query_mode
        self.with_attention = attention_config is not None
        if self.with_attention:
            self.attention_module = build_plugin_layer(attention_config, '_roi_attention_module')[1]

    @force_fp32(apply_to=('feats', ), out_fp16=True)
    def forward(self, feats, rois, roi_scale_factor=None):
        """Forward function.
        
        Args
            feats (tuple) : contain tensors from multi fpn layer
            rois (tensor) : [n, 5]

        """
        if len(feats) == 1:
            return self.roi_layers[0](feats[0], rois)

        out_size = self.roi_layers[0].output_size
        num_levels = len(feats)
        roi_feats_list = [feats[0].new_zeros(
            rois.size(0), self.out_channels, *out_size) for i in range(num_levels)]

        # some times rois is an empty tensor
        if roi_feats_list[0].shape[0] == 0:
            return roi_feats_list[0]

        if roi_scale_factor is not None:
            rois = self.roi_rescale(rois, roi_scale_factor)

        # mark the starting channels for concat mode
        for i in range(num_levels):
            roi_feats_list[i] = self.roi_layers[i](feats[i], rois)
        roi_feats = torch.stack(roi_feats_list) # [num_levels, num_rois, d, roi_size, roi_size]
        roi_feats = roi_feats.permute(1,0,2,3,4)  # [num_rois, num_levels, d, roi_size, roi_size]
        if self.query_mode == "max":
            query_feature = torch.max(roi_feats, dim=1, keepdim=True)[0] # [num_rois, 1, d, roi_size, roi_size]

        if self.with_attention:
            # apply post-processing before return the result
            roi_feats = self.attention_module(roi_feats, query_feature) # 
        return roi_feats # need return [num_rois, dim, roi_size, roi_size]
