# modified on ASFF - Adaptive Spatial Feature Fusion 
# https://github.com/ruinmessi/ASFF
# paper: Learning Spatial Fusion for Single-Shot Object Detection 
# By Songtao Liu, Di Huang, Yunhong Wang
# [https://arxiv.org/abs/1911.09516]


import torch
import torch.nn as nn
import torch.nn.functional as F

from typing import List, Tuple, Optional, Union
from collections.abc import Iterable

from .cbam import ConvolutionBatchnormActivationModule as CBAM

__all__ = ['AdaptiveSpatialFeatureFusion']

class AdaptiveSpatialFeatureFusion(nn.Module):
    '''
        re-written adaptively spatial feature fusion module
    '''
    def __init__(self, level: int, level_dims: List[int], 
                 weight_channels: int = 16, 
                 interpolate: str = 'nearest',
                 align_corners: bool = False,
                 scale: Union[Iterable, float, int, None] = .5, 
                 device: torch.device = 'cpu', 
                 debug: bool = False, **kwargs) -> None:
        '''Args:
            level:           int          - current ASFF level
            level_dims:      List[int]    - number of channels on each feature map to fuse
            out_planes:      int          - number of output channels
            weight_channels: int          - number of channels reserved for calculating each weight map
            interpolate:     str          - interpolate mode of F.interpolate ('nearest' or 'bilinear')
            scale:           float        - scale factor between each adjacent feature maps [float]
                             list[float]  - scale factors between each feature map and current level feature map [list]
            device:          torch.device -
        '''
        super(AdaptiveSpatialFeatureFusion, self).__init__()
        self.__supported_levels = 3
        assert len(level_dims) == self.__supported_levels, 'invalid initialization parameters'
        
        if level not in range(self.__supported_levels):
            raise ValueError(f"argument 'level' should be in {list(range(self.__supported_levels))}, "
                             f"but got {level} instead")
        self.level: int = level
        self._debug : bool = debug
        self.scale_factors: List[float]
        if scale is None: # no size change among feature map(s)
            self.scale_factors = [1.] * self.__supported_levels
        elif isinstance(scale, (float, int)): # build scale factor list
            self.scale_factors = [1.] * self.__supported_levels
            for i in range(level + 1, self.__supported_levels):
                self.scale_factors[i] = self.scale_factors[i - 1] / scale
            for i in range(level - 1, -1, -1):
                self.scale_factors[i] = self.scale_factors[i + 1] * scale
        elif isinstance(scale, Iterable):
            if len(scale) == self.__supported_levels:
                if scale[level] == 1.:
                    self.scale_factors: List[float] = scale
                else:
                    self.scale_factors: List[float] = []
                    for f in scale:
                        self.scale_factors.append(f / scale[level])
            else:
                raise ValueError(f"invalid scale factor(s): the length of scale list should be {self.__supported_levels}, "
                                 f"but got {len(scale)} instead")
        else:
            raise TypeError("invalid scale factor(s): unsupported type")
        self.device: torch.device = device
        if interpolate == 'nearest' or interpolate == 'bilinear':
            self.interpolate: str = interpolate
        else:
            raise NotImplementedError(f"unsupported interpolate method: "
                                      f"this module only supports 'nearest' or 'bilinear' methods so far, "
                                      f"but got {interpolate} instead")
        self.align_corners: Optional[bool] = align_corners if interpolate == 'bilinear' else None

        resizer: list = []
        weighter: list = []
        for i in range(self.__supported_levels):
            if i < level: # downsample
                resizer.append(CBAM(in_channels = level_dims[i],
                                    out_channels = level_dims[level],
                                    kernel_size = 3,
                                    stride = 2, **kwargs).to(device))
            elif i > level: # upsample
                resizer.append(CBAM(in_channels = level_dims[i],
                                    out_channels = level_dims[level],
                                    kernel_size = 1,
                                    stride = 1, **kwargs).to(device))
            else:
                resizer.append(None)
            weighter.append(CBAM(in_channels = level_dims[level],
                                 out_channels = weight_channels,
                                 kernel_size = 3,
                                 stride = 1, **kwargs).to(device))
        self.resizer = nn.ModuleList(resizer)
        self.weighter = nn.ModuleList(weighter)
        self.collector = nn.Conv2d(in_channels = weight_channels * self.__supported_levels,
                                   out_channels = self.__supported_levels,
                                   kernel_size = 1,
                                   stride = 1).to(device)
        self.out_channels: int = level_dims[level]

        # initialize
        nn.init.xavier_uniform_(self.collector.weight)


    def forward(self, x: Tuple[torch.tensor, ...]) -> torch.tensor:
        resized_maps: List[torch.tensor] = [] # list[N]: (B, C, H, W)
        weights: List[torch.tensor] = [] # list[N]: (B, c, H, W)

        for i in range(self.__supported_levels):
            if i < self.level: # downsample
                if i < self.level - 1: # apply maxpool2d
                    resized_map: torch.tensor = self.resizer[i](F.max_pool2d(x[i], 2**(self.level-1-i)))
                else:
                    resized_map: torch.tensor = self.resizer[i](x[i])
                resized_maps.append(resized_map)
            elif i > self.level: # upsample
                resized_map: torch.tensor = self.resizer[i](x[i])
                resized_map = F.interpolate(resized_map,
                                            scale_factor = self.scale_factors[i],
                                            mode = self.interpolate,
                                            align_corners = self.align_corners,
                                            recompute_scale_factor = False)
                resized_maps.append(resized_map)
            else:
                resized_maps.append(x[i])
            weight: torch.tensor = self.weighter[i](resized_maps[i])
            weights.append(weight)

        weights: torch.tensor = torch.cat(weights, dim=1) # to tensor (B, c*N, H, W)
        weight: torch.tensor = self.collector(weights) # (B, N, H, W)
        weight = F.softmax(weight, dim=1)

        fused_feat: torch.tensor = torch.zeros_like(x[self.level])
        for i in range(self.__supported_levels):
            fused_feat += weight[:, i].unsqueeze(dim=1) * resized_maps[i]

        if self._debug:
            return fused_feat, weight
        else:
            return fused_feat