import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, caffe2_xavier_init,build_conv_layer, build_norm_layer
from torch.utils.checkpoint import checkpoint
import mmcv


from ..builder import NECKS

class AttentionModule(nn.Module):
    """
    Attention Moudle for HRNet

    the output of a HRNet stage will be passed into this module to calculate
    the channel-wise weight and spatial-wise weight
    """

    def __init__(self,
                 num_branches,
                 list_num_channels,
                 list_channel_feat,
                 list_spatial_feat_ch,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN')):
        super(AttentionModule, self).__init__()
        # check branch
        assert num_branches == len(list_num_channels)
        assert num_branches == len(list_channel_feat)
        assert num_branches == len(list_spatial_feat_ch)

        self.num_branches = num_branches
        self.list_num_channels = list_num_channels
        self.list_channel_feat = list_channel_feat
        self.list_spatial_feat_ch = list_spatial_feat_ch
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg

        self.branches = self.__make_branches(self.num_branches,
                                             self.list_num_channels,
                                             self.list_channel_feat,
                                             self.list_spatial_feat_ch)

    def __make_branches(self,
                        num_branches,
                        list_num_channels,
                        list_channel_feat,
                        list_spatial_feat_ch):
        branches = []
        for i in range(num_branches):
            branches.append(self.__make_one_branch(list_num_channels[i],
                                                   list_channel_feat[i],
                                                   list_spatial_feat_ch[i]))
        return nn.ModuleList(branches)
        

    def __make_one_branch(self,
                          num_channels,
                          channel_feat,
                          spatial_feat_ch):

        channel_attention = nn.Sequential(
            nn.Linear(num_channels*2, channel_feat),
            nn.LeakyReLU(),
            nn.Linear(channel_feat, num_channels),
            nn.Sigmoid()
        )

        '''
        impletation follows CBAM
        '''
        # spatial_attention = nn.Sequential(
        #     build_conv_layer(self.conv_cfg,
        #                      2,
        #                      spatial_feat_ch,
        #                      kernel_size=3,
        #                      padding=1,
        #                      bias=True),
        #     build_norm_layer(self.norm_cfg,
        #                      spatial_feat_ch)[1],
        #     nn.LeakyReLU(),
        #     build_conv_layer(self.conv_cfg,
        #                      spatial_feat_ch,
        #                      1,
        #                      kernel_size=3,
        #                      padding=1,
        #                      bias=True),
        #     nn.Sigmoid()
        # )
        '''
        my impletation
        '''
        spatial_attention = nn.Sequential(
            build_conv_layer(self.conv_cfg,
                             num_channels,
                             spatial_feat_ch,
                             kernel_size=3,
                             padding=1,
                             bias=True),
            build_norm_layer(self.norm_cfg,
                             spatial_feat_ch)[1],
            nn.LeakyReLU(),
            build_conv_layer(self.conv_cfg,
                             spatial_feat_ch,
                             spatial_feat_ch,
                             kernel_size=3,
                             padding=1,
                             bias=True),
            build_norm_layer(self.norm_cfg,
                             spatial_feat_ch)[1],
            nn.LeakyReLU(),
            build_conv_layer(self.conv_cfg,
                             spatial_feat_ch,
                             1,
                             kernel_size=3,
                             padding=1,
                             bias=True),
            nn.Sigmoid()
        )        

        layers = [channel_attention, spatial_attention]

        return nn.ModuleList(layers)

    def forward(self, x):

        attention_feat = []

        # att = []

        for i in range(self.num_branches):
            branch_feat = x[i]

            max_feat = nn.functional.max_pool2d(branch_feat, (branch_feat.shape[2],branch_feat.shape[3]))
            avg_feat = nn.functional.avg_pool2d(branch_feat, (branch_feat.shape[2],branch_feat.shape[3]))
            channel_feat = torch.cat((max_feat.reshape(max_feat.shape[0],-1),avg_feat.reshape(max_feat.shape[0],-1)), dim=1)
            channel_attention = self.branches[i][0](channel_feat)

            branch_feat = branch_feat * channel_attention.reshape((channel_attention.shape[0], channel_attention.shape[1],1,1)) * 2

            '''
            impletation follows CBAM
            '''
            # max_feat = nn.functional.max_pool3d(branch_feat, (branch_feat.shape[1], 1, 1))
            # avg_feat = nn.functional.avg_pool3d(branch_feat, (branch_feat.shape[1], 1, 1))
            # spatial_feat = torch.cat((max_feat, avg_feat), dim=1)
            # spatial_attention = self.branches[i][1](spatial_feat)
            # attention_feat.append(branch_feat * spatial_attention * 2)
            '''
            my impletation
            '''
            spatial_attention = self.branches[i][1](branch_feat)
            attention_feat.append(branch_feat * spatial_attention * 2)
        #     max_feat = nn.functional.max_pool3d(branch_feat, (branch_feat.shape[1], 1, 1))
        #     att.append(spatial_attention)
        #     att.append(max_feat)
        # mmcv.dump(att,'/media/gejunyao/Disk/Gejunyao/exp_results/visualization/middle_part/hrfpn_data.pkl')        
        return attention_feat


@NECKS.register_module()
class AttentionHRFPN(nn.Module):
    """HRFPN (High Resolution Feature Pyrmamids)

    paper: `High-Resolution Representations for Labeling Pixels and Regions
    <https://arxiv.org/abs/1904.04514>`_.

    Args:
        in_channels (list): number of channels for each branch.
        out_channels (int): output channels of feature pyramids.
        num_outs (int): number of output stages.
        pooling_type (str): pooling for generating feature pyramids
            from {MAX, AVG}.
        conv_cfg (dict): dictionary to construct and config conv layer.
        norm_cfg (dict): dictionary to construct and config norm layer.
        with_cp  (bool): Use checkpoint or not. Using checkpoint will save some
            memory while slowing down the training speed.
        stride (int): stride of 3x3 convolutional layers
    """

    def __init__(self,
                 in_channels,
                 out_channels,
                 num_outs=5,
                 pooling_type='AVG',
                 conv_cfg=None,
                 norm_cfg=None,
                 with_cp=False,
                 stride=1,
                 attention_setting=None,):
        super(AttentionHRFPN, self).__init__()
        assert isinstance(in_channels, list)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_ins = len(in_channels)
        self.num_outs = num_outs
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.attention_setting = attention_setting

        self.reduction_conv = ConvModule(
            sum(in_channels),
            out_channels,
            kernel_size=1,
            conv_cfg=self.conv_cfg,
            act_cfg=None)

        self.fpn_convs = nn.ModuleList()
        for i in range(self.num_outs):
            self.fpn_convs.append(
                ConvModule(
                    out_channels,
                    out_channels,
                    kernel_size=3,
                    padding=1,
                    stride=stride,
                    conv_cfg=self.conv_cfg,
                    act_cfg=None))

        if pooling_type == 'MAX':
            self.pooling = F.max_pool2d
        else:
            self.pooling = F.avg_pool2d

        if self.attention_setting is not None:
            self.attention = AttentionModule(self.num_outs,
                                            [self.out_channels for _ in range(self.num_outs)],
                                            [256 for _ in range(self.num_outs)],
                                            self.attention_setting['list_spatial_feat_ch'])
        

    def init_weights(self):
        """Initialize the weights of module."""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                caffe2_xavier_init(m)

    def forward(self, inputs):
        """Forward function."""
        assert len(inputs) == self.num_ins
        outs = [inputs[0]]
        for i in range(1, self.num_ins):
            outs.append(
                F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
        out = torch.cat(outs, dim=1)
        if out.requires_grad and self.with_cp:
            out = checkpoint(self.reduction_conv, out)
        else:
            out = self.reduction_conv(out)
        outs = [out]
        for i in range(1, self.num_outs):
            outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
        outputs = []

        for i in range(self.num_outs):
            if outs[i].requires_grad and self.with_cp:
                tmp_out = checkpoint(self.fpn_convs[i], outs[i])
            else:
                tmp_out = self.fpn_convs[i](outs[i])
            outputs.append(tmp_out)
        # output the results from hrfpn
        # mmcv.dump(outputs,'/media/gejunyao/Disk/Gejunyao/exp_results/visualization/middle_part/hrfpn_features.pkl')   

        if self.attention_setting is not None:
            outputs = self.attention(outputs)
        return tuple(outputs)
