import torch
import torch.nn as nn
from mmcv.cnn import ConvModule

from mmseg.ops import resize
from ..builder import HEADS
from .refine_decode_head import RefineBaseDecodeHead
from .aspp_head import ASPPModule



@HEADS.register_module()
class RefineASPPHead(RefineBaseDecodeHead):
    """Rethinking Atrous Convolution for Semantic Image Segmentation.

    This head is the implementation of `DeepLabV3
    <https://arxiv.org/abs/1706.05587>`_.

    Args:
        dilations (tuple[int]): Dilation rates for ASPP module.
            Default: (1, 6, 12, 18).
    """

    def __init__(self, dilations=(1, 6, 12, 18), **kwargs):
        super(RefineASPPHead, self).__init__(**kwargs)
        assert isinstance(dilations, (list, tuple))
        self.dilations = dilations
        self.image_pool = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            ConvModule(
                self.in_channels,
                self.channels,
                1,
                conv_cfg=self.conv_cfg,
                norm_cfg=self.norm_cfg,
                act_cfg=self.act_cfg))
        self.aspp_modules = ASPPModule(
            dilations,
            self.in_channels,
            self.channels,
            conv_cfg=self.conv_cfg,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)
        self.bottleneck = ConvModule(
            (len(dilations) + 1) * self.channels,
            self.channels,
            3,
            padding=1,
            conv_cfg=self.conv_cfg,
            norm_cfg=self.norm_cfg,
            act_cfg=self.act_cfg)

    def forward(self, inputs):
        """Forward function."""
        x = self._transform_inputs(inputs)

        # print("x---------------------",len(x))                         #16
        # print("x0---------------------",x[0].shape)                    #([512, 16, 16])
        # print("x1---------------------",x[1].shape)                    #([512, 16, 16])
        # print("x2---------------------",x[2].shape)                    #([512, 16, 16])
        # print("x3---------------------",x[3].shape)                    #([512, 16, 16])
        # print("in---------------------",len(inputs))                   #4

        #
        # print("in0---------------------",inputs[0].shape)              # torch.Size([16, 64, 32, 32])
        # print("in1---------------------",inputs[1].shape)              # torch.Size([16, 128, 16, 16])
        # print("in2---------------------",inputs[2].shape)              # torch.Size([16, 256, 16, 16])
        # print("in3---------------------",inputs[3].shape)              # torch.Size([16, 512, 16, 16])

        # 用来存储decoder的中间特征图
        fm_middle = []
        aspp_outs = [
            resize(
                self.image_pool(x),
                size=x.size()[2:],
                mode='bilinear',
                align_corners=self.align_corners)
        ]
        aspp_outs.extend(self.aspp_modules(x))
        aspp_outs = torch.cat(aspp_outs, dim=1)
        output = self.bottleneck(aspp_outs)
        # fm_middle: 初期验证，采用最后cls_seg之前最近的一个特征图
        fm_middle.append(output)                #torch.Size([16, 128, 16, 16])
        output = self.cls_seg(output)
        fm_middle.append(output)                # torch.Size([16, 2, 16, 16])

        # print("fm---------------------",len(fm_middle))   #2
        # print("fm0---------------------",fm_middle[0].shape)   #torch.Size([16, 128, 16, 16])

        ##
        #要留下fm[0]作为deepdeat的花，就拼在resout里面（append）
        # print("fm1---------------------",fm_middle[1].shape)   # torch.Size([16, 2, 16, 16])

        #return output, fm_middle
        return output , inputs
