import torch
import torch.nn as nn
import torch.nn.functional as F
from maskrcnn_benchmark.modeling.fancy_module.attention import CBAM
from maskrcnn_benchmark.modeling.fancy_module.gcblock import ContextBlock


class GCDenseRelationDistill(nn.Module):
    """
    使用全局上下文非局部注意力 ContextBlock
    """

    def __init__(self, indim, keydim, valdim=128, dense_sum=False):
        super(GCDenseRelationDistill, self).__init__()
        # self.key_q = nn.Conv2d(indim, keydim, kernel_size=(3,3), padding=(1,1), stride=1)
        # self.value_q = nn.Conv2d(indim, valdim, kernel_size=(3,3), padding=(1,1), stride=1)
        # 将支持特征的维度变为1
        self.support = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
        self.sum = dense_sum
        self.gcb = ContextBlock(inplanes=128, ratio=1.0 / 4, pooling_type='att')
        if self.sum:
            self.value_q0 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q1 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q2 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q3 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q4 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.bnn0 = nn.BatchNorm2d(256)
            self.bnn1 = nn.BatchNorm2d(256)
            self.bnn2 = nn.BatchNorm2d(256)
            self.bnn3 = nn.BatchNorm2d(256)
            self.bnn4 = nn.BatchNorm2d(256)
            self.combine = nn.Conv2d(512, 256, kernel_size=1, padding=0, stride=1)

    def forward(self, features, attentions):
        features = list(features)
        # len features 5 代表5层来自不同stage的不同分辨率的特征图
        if isinstance(attentions, dict):
            for i in range(len(attentions)):
                if i == 0:
                    atten = attentions[i].unsqueeze(0)
                else:
                    atten = torch.cat((atten, attentions[i].unsqueeze(0)), dim=0)
            attentions = atten.cuda()
        output = []
        # batch size 4
        # attentions shape: 20,256,16,16
        # 注意力特征的大小固定，都是16x16
        h, w = attentions.shape[2:]
        ncls = attentions.shape[0]
        # ncls: 20
        attentions = self.support(attentions)
        # attentions 20, 128, 16, 16

        for idx in range(len(features)):
            feature = features[idx]

            # 查询特征的大小不固定
            # 每个batch的大小相同
            # feature shape 4,256,272,304
            # feature shape 4,256,136,152
            bs = feature.shape[0]
            H, W = feature.shape[2:]
            # H,W=272,304
            # h,w=16,16
            feature = eval('self.value_q' + str(idx))(feature)
            # 4,128,272,304
            feature = F.interpolate(feature, size=(h, w), mode='bilinear', align_corners=True)
            # feature shape 4,128,16,16

            for i in range(bs):
                feature_i = feature[i].unsqueeze(0)
                # feature_i 1,128,16,16
                # attentions shape 20 128 16 16
                for j in range(ncls):
                    gcb_out = self.gcb(feature_i, attentions[j].unsqueeze(0))
                    # final_2 1 128 16 16
                    if j == 0:
                        # attentions_j shape 1 128 16 16
                        final_2 = torch.cat((feature_i, gcb_out), dim=1)
                        # final_2 1 256 16 16
                    else:
                        final_2 += torch.cat((feature_i, gcb_out), dim=1)
                # 20个支持值映射加权求和
                # final_2: 1, 256, 16, 16
                if i == 0:
                    final_1 = final_2
                else:
                    final_1 = torch.cat((final_1, final_2), dim=0)

            final_1 = F.interpolate(final_1, size=(H, W), mode='bilinear', align_corners=True)
            # final_1: 4,256,272,304
            if self.sum:
                final_1 = eval('self.bnn' + str(idx))(final_1)

            output.append(final_1)

        if self.sum:
            for i in range(len(output)):
                output[i] = self.combine(torch.cat((features[i], output[i]), dim=1))
        output = tuple(output)

        return output


class NaiveDenseRelationDistill(nn.Module):
    """
    直接将支持特征求和，与查询特征拼接在一起
    """

    def __init__(self, indim, keydim, valdim, dense_sum=False):
        super(NaiveDenseRelationDistill, self).__init__()
        # self.key_q = nn.Conv2d(indim, keydim, kernel_size=(3,3), padding=(1,1), stride=1)
        # self.value_q = nn.Conv2d(indim, valdim, kernel_size=(3,3), padding=(1,1), stride=1)
        self.value_t = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
        self.sum = dense_sum
        if self.sum:
            self.value_q0 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q1 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q2 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q3 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q4 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.bnn0 = nn.BatchNorm2d(256)
            self.bnn1 = nn.BatchNorm2d(256)
            self.bnn2 = nn.BatchNorm2d(256)
            self.bnn3 = nn.BatchNorm2d(256)
            self.bnn4 = nn.BatchNorm2d(256)
            self.combine = nn.Conv2d(512, 256, kernel_size=1, padding=0, stride=1)
        # DenseRelationDistill(256,32,128,self.dense_sum)

    def forward(self, features, attentions):
        """
        sum_val_t torch.Size([1, 128, 200, 304])
        sum_val_t torch.Size([1, 128, 100, 152])
        sum_val_t torch.Size([1, 128, 50, 76])
        sum_val_t torch.Size([1, 128, 25, 38])
        sum_val_t torch.Size([1, 128, 13, 19])
        Args:
            features:
            attentions:

        Returns:

        """
        features = list(features)
        # len features 5 代表5层来自不同stage的不同分辨率的特征图
        if isinstance(attentions, dict):
            for i in range(len(attentions)):
                if i == 0:
                    atten = attentions[i].unsqueeze(0)
                else:
                    atten = torch.cat((atten, attentions[i].unsqueeze(0)), dim=0)
            attentions = atten.cuda()
        output = []
        # batch size 4
        # attentions shape: 20,256,16,16
        # 注意力特征的大小固定，都是16x16
        # h, w = attentions.shape[2:]
        # ncls = attentions.shape[0]
        # ncls: 20
        val_t = self.value_t(attentions)
        # 将val_t先求和，后放大，最后与查询特征拼接起来
        # val_t shape 20 128 16 16

        sum_val_t = torch.sum(val_t, 0).unsqueeze(0)
        # sum_val_t 1 128 16 16
        for idx in range(len(features)):
            # len features 5
            # 对单个阶段的特征进行处理
            feature = features[idx]
            # 查询特征的大小不固定
            # feature shape 4,256,272,304
            # feature shape 4,256,136,152
            bs = feature.shape[0]
            H, W = feature.shape[2:]
            # H,W=272,304
            # h,w=16,16

            # feature维度不变，将sum_val_t插值到feature大小，拼接起来
            sum_val_t = F.interpolate(sum_val_t, size=(H, W), mode='bilinear', align_corners=True)
            # print('sum_val_t', sum_val_t.shape)
            # sum_val_t 1 128 272 304
            # 5种尺寸的特征图对应于5个查询编码器

            val_q = eval('self.value_q' + str(idx))(feature)
            # val_q shape 4,128,272,304
            for i in range(bs):

                vq = val_q[i].unsqueeze(0)
                # vq shape 1,128,272,304

                final_2 = torch.cat((vq, sum_val_t), dim=1)
                # final_2: 1, 256 x h x w
                # final_2: 1, 256, 272, 304
                if i == 0:
                    final_1 = final_2
                else:
                    final_1 = torch.cat((final_1, final_2), dim=0)

            # final_1: bs x 256 x h x w
            # final_1: 4,256,272,304
            if self.sum:
                final_1 = eval('self.bnn' + str(idx))(final_1)

            output.append(final_1)

        if self.sum:
            for i in range(len(output)):
                output[i] = self.combine(torch.cat((features[i], output[i]), dim=1))
        output = tuple(output)
        # sys.exit(0)
        return output


class CBAMDenseRelationDistill(nn.Module):
    # 通道注意力和空间注意力叠加
    def __init__(self, indim, keydim, valdim, dense_sum=False):
        super(CBAMDenseRelationDistill, self).__init__()
        # self.key_q = nn.Conv2d(indim, keydim, kernel_size=(3,3), padding=(1,1), stride=1)
        # self.value_q = nn.Conv2d(indim, valdim, kernel_size=(3,3), padding=(1,1), stride=1)
        self.value_t = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
        self.sum = dense_sum
        self.cbam = CBAM(indim)
        if self.sum:
            self.value_q0 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q1 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q2 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q3 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.value_q4 = nn.Conv2d(indim, valdim, kernel_size=(3, 3), padding=(1, 1), stride=1)
            self.bnn0 = nn.BatchNorm2d(256)
            self.bnn1 = nn.BatchNorm2d(256)
            self.bnn2 = nn.BatchNorm2d(256)
            self.bnn3 = nn.BatchNorm2d(256)
            self.bnn4 = nn.BatchNorm2d(256)
            self.combine = nn.Conv2d(512, 256, kernel_size=1, padding=0, stride=1)
        # DenseRelationDistill(256,32,128,self.dense_sum)

    def forward(self, features, attentions):
        """
        sum_val_t torch.Size([1, 128, 200, 304])
        sum_val_t torch.Size([1, 128, 100, 152])
        sum_val_t torch.Size([1, 128, 50, 76])
        sum_val_t torch.Size([1, 128, 25, 38])
        sum_val_t torch.Size([1, 128, 13, 19])
        """
        features = list(features)
        # len features 5 代表5层来自不同stage的不同分辨率的特征图
        if isinstance(attentions, dict):
            for i in range(len(attentions)):
                if i == 0:
                    atten = attentions[i].unsqueeze(0)
                else:
                    atten = torch.cat((atten, attentions[i].unsqueeze(0)), dim=0)
            attentions = atten.cuda()
        output = []
        # batch size 4
        # attentions shape: 20,256,16,16
        # 注意力特征的大小固定，都是16x16
        # h, w = attentions.shape[2:]
        # ncls = attentions.shape[0]
        # ncls: 20

        # --------------------------
        # 先求均值，再过CBAM，再降维
        sum_val_t = torch.mean(attentions, 0).unsqueeze(0)
        # sum_val_t 1 256 16 16
        sum_val_t = self.cbam(sum_val_t)
        # sum_val_t 1 256 16 16
        sum_val_t = self.value_t(sum_val_t)
        # sum_val_t 1 128 16 16

        for idx in range(len(features)):
            # len features 5
            # 对单个阶段的特征进行处理
            feature = features[idx]
            # 查询特征的大小不固定
            # feature shape 4,256,272,304
            # feature shape 4,256,136,152
            bs = feature.shape[0]
            H, W = feature.shape[2:]
            # H,W=272,304
            # h,w=16,16

            # feature维度不变，将sum_val_t插值到feature大小，拼接起来
            sum_val_t = F.interpolate(sum_val_t, size=(H, W), mode='bilinear', align_corners=True)
            # print('sum_val_t', sum_val_t.shape)
            # sum_val_t 1 128 272 304
            # 5种尺寸的特征图对应于5个查询编码器

            val_q = eval('self.value_q' + str(idx))(feature)
            # val_q shape 4,128,272,304
            for i in range(bs):

                vq = val_q[i].unsqueeze(0)
                # vq shape 1,128,272,304

                final_2 = torch.cat((vq, sum_val_t), dim=1)
                # final_2: 1, 256 x h x w
                # final_2: 1, 256, 272, 304
                if i == 0:
                    final_1 = final_2
                else:
                    final_1 = torch.cat((final_1, final_2), dim=0)

            # final_1: bs x 256 x h x w
            # final_1: 4,256,272,304
            if self.sum:
                final_1 = eval('self.bnn' + str(idx))(final_1)

            output.append(final_1)

        if self.sum:
            for i in range(len(output)):
                output[i] = self.combine(torch.cat((features[i], output[i]), dim=1))
        output = tuple(output)
        # sys.exit(0)
        return output
