from .resnet import resnet50, resnet101

import torch
from torch import nn
import torch.nn.functional as F


class ASPPConv(nn.Sequential):
    def __init__(self, in_channels, out_channels, dilation):
        modules = [
            nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        ]
        super(ASPPConv, self).__init__(*modules)


class ASPPPooling(nn.Sequential):
    def __init__(self, in_channels, out_channels):
        super(ASPPPooling, self).__init__(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_channels, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True))

    def forward(self, x):
        size = x.shape[-2:]
        x = super(ASPPPooling, self).forward(x)
        return F.interpolate(x, size=size, mode='bilinear', align_corners=False)


class ASPP(nn.Module):
    def __init__(self, in_channels, out_channels=256, atrous_rates=[6, 12, 18]):
        super(ASPP, self).__init__()
        modules = []
        modules.append(nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)))

        rate1, rate2, rate3 = tuple(atrous_rates)
        modules.append(ASPPConv(in_channels, out_channels, rate1))
        modules.append(ASPPConv(in_channels, out_channels, rate2))
        modules.append(ASPPConv(in_channels, out_channels, rate3))
        modules.append(ASPPPooling(in_channels, out_channels))

        self.convs = nn.ModuleList(modules)

        self.project = nn.Sequential(
            nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout(0.1), )

    def forward(self, x):
        res = []
        for conv in self.convs:
            res.append(conv(x))
        res = torch.cat(res, dim=1)
        return self.project(res)



class Feature_Reweighting(nn.Module):

    # Ref: https://github.com/yiskw713/DualAttention_for_Segmentation/blob/master/model/attention.py

    def __init__(self):
        super(Feature_Reweighting, self).__init__()

        self.alpha = nn.Parameter(torch.zeros(1), requires_grad=True)   # Learnable Param.
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, sup_fea, que_fea):
        """
        inputs :
            x : feature maps from feature extractor. (N, C, H, W)
        outputs :
            feature maps weighted by attention along a channel dimension
        """

        N, C, H, W = sup_fea.shape

        query_sup = sup_fea.reshape(N, C, -1)                  # (B, C, H*W)
        query_que = que_fea.reshape(N, C, -1)                  # (B, C, H*W)

        key_que   = que_fea.reshape(N, C, -1).permute(0, 2, 1) # (B, H*W, C)
        value_que = que_fea.reshape(N, C, -1)                  # (B, C, H*W)

        cross_atten = torch.bmm(query_sup, key_que)
        self_atten  = torch.bmm(query_que, key_que)

        self_atten = torch.max(self_atten, -1, keepdim=True)[0].expand_as(self_atten) - self_atten  # prevent loss divergence https://github.com/junfu1115/DANet/issues/9
        cross_atten = torch.max(cross_atten, -1, keepdim=True)[0].expand_as(cross_atten) - cross_atten

        cross_atten_soft = self.softmax(cross_atten)  # (C, C)
        self_atten_soft = self.softmax(self_atten)    # (C, C)

        total_atten = (self_atten_soft + self.alpha*cross_atten_soft)/(1+self.alpha)

        out = torch.bmm(total_atten, value_que)
        out = out.view(N, C, H, W)
        out = out + que_fea

        return out, self.alpha


class CPMT(nn.Module):
    def __init__(self, backbone, shot=1, pretrained=True):
        super(CPMT, self).__init__()
        backbone = eval(backbone)(pretrained=pretrained)  # 创建backbone
        self.layer0 = nn.Sequential(backbone.conv1, backbone.bn1, backbone.relu, backbone.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4
        self.shot = shot



        fea_dim = 512 + 1024
        reduce_dim = 256

        self.down_query = nn.Sequential(
            nn.Conv2d(fea_dim, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=0.5)
        )

        self.down_supp = nn.Sequential(
            nn.Conv2d(fea_dim, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=0.5)
        )




    def forward(self, img_s_list, mask_s_list, img_q, mask_q):
        """
        img_s_list: List
        mask_s_list: List
        img_q:[batch_size,3,473,473]
        mask_q:[batch_size,473,473]

        """
        h, w = img_q.shape[-2:]

                # feature maps of support images
        supp_list_3 = []
        supp_list_4 = []

        #  获取支持集的特征
        for k in range(len(img_s_list)):
            with torch.no_grad():
                s_0 = self.layer0(img_s_list[k])  # [4,128,119,119]
                s_1 = self.layer1(s_0)  # [4,128,119,119] -> [4,256,119,119]
            
            s_2 = self.layer2(s_1)  # [4,256,119,119] -> [4,512,60,60]
            s_3 = self.layer3(s_2)  # [4,512,60,60]   -> [4,1024,60,60]
            s_4 = self.layer4(s_3)  # [4,1024,60,60]  -> [4,2048,60,60]

            supp_list_4.append(s_4)  # 添加高级语义特征

            # 对中级语义特征进行处理
            supp_feat = torch.cat([s_2, s_3], 1)  # [4,512,60,60] + [4,1024,60,60] =[4,1536,60,60]
            supp_feat = self.down_supp(supp_feat)  # [4,256,60,60]
            supp_list_3.append(supp_feat)

        # 获取查询集图像的特征
        with torch.no_grad():
            q_0 = self.layer0(img_q)
            q_1 = self.layer1(q_0)

        q_2 = self.layer2(q_1)
        q_3 = self.layer3(q_2)  # [4,1024,60,60]
        query_feat_4 = self.layer4(q_3)  # [4,1024,60,60] -> [4,2048,60,60]

        feat = torch.cat([q_2, q_3], 1)
        query_feat_3 = self.down_query(feat)


        """
        supp_feat_list: 支持集对应的中级语义特征的原型 shot x [batch_size,256,60,60]
        final_supp_list : 支持集对应的高级语义特征 shot x [batch_size,2048,60,60]
        query_feat: 查询集的中级语义特征 [batch_size,256,60,60]
        final_query_feat: 查询集的中级语义特征 [batch_size,2048,60,60]
        """

        #  根据中级特征出分割结果
        # foreground(target class) and background prototypes pooled from K support features
        feature_fg_list = []
        feature_bg_list = []

        for k in range(len(img_s_list)):
            feature_fg = self.masked_average_pooling(supp_list_3[k], (mask_s_list[k] == 1).float())[None, :]  # feature_fg=[1,4,1024]
            feature_bg = self.masked_average_pooling(supp_list_3[k], (mask_s_list[k] == 0).float())[None, :]  # feature_bg=[1,4,1024]
            feature_fg_list.append(feature_fg)
            feature_bg_list.append(feature_bg)

        FP = torch.mean(torch.cat(feature_fg_list, dim=0), dim=0).unsqueeze(-1).unsqueeze(-1)  # 对shot个图片进行平均，计算原型 [4,1024,1,1]
        BP = torch.mean(torch.cat(feature_bg_list, dim=0), dim=0).unsqueeze(-1).unsqueeze(-1)  # 背景原型 [4,1024,1,1]

        # 计算查询特征和前景和背景的原型 之间的相似度。计算出初步的分割掩码
        out_0 = self.similarity_func(query_feat_3, FP, BP)  # [4,2,60,60]
        out_0 = F.interpolate(out_0, size=(h, w), mode="bilinear", align_corners=True)  # [4,2,473,473]


        #  根据高级特征出分割结果
        # foreground(target class) and background prototypes pooled from K support features
        feature_fg_list_4 = []
        feature_bg_list_4 = []

        for k in range(len(img_s_list)):
            feature_fg = self.masked_average_pooling(supp_list_4[k], (mask_s_list[k] == 1).float())[None, :]  # feature_fg=[1,4,1024]
            feature_bg = self.masked_average_pooling(supp_list_4[k], (mask_s_list[k] == 0).float())[None, :]  # feature_bg=[1,4,1024]
            feature_fg_list_4.append(feature_fg)
            feature_bg_list_4.append(feature_bg)

        
        FP_4 = torch.mean(torch.cat(feature_fg_list_4, dim=0), dim=0).unsqueeze(-1).unsqueeze(-1)  # 对shot个图片进行平均，计算原型 [4,1024,1,1]
        BP_4 = torch.mean(torch.cat(feature_bg_list_4, dim=0), dim=0).unsqueeze(-1).unsqueeze(-1)  # 背景原型 [4,1024,1,1]

        # 计算查询特征和前景和背景的原型 之间的相似度。计算出初步的分割掩码
        out_1 = self.similarity_func(query_feat_4, FP_4, BP_4)  # [4,2,60,60]
        out_1 = F.interpolate(out_1, size=(h, w), mode="bilinear", align_corners=True)  # [4,2,473,473]



        if self.training:

            return [out_0, out_1]
        

        return out_0



        
        








 

    def similarity_func(self, feature_q, fg_proto, bg_proto):
        """
         通过计算相似度来进行分割
         feature_q: [4,1024,60,60] 查询集特征
         fg_proto: [4,1024,1,1] 前景原型
         bg_proto: [4,1024,1,1] 背景原型
         结果: [4,2,60,60] 初步的分割结果
        """

        similarity_fg = F.cosine_similarity(feature_q, fg_proto, dim=1)
        similarity_bg = F.cosine_similarity(feature_q, bg_proto, dim=1)

        out = torch.cat((similarity_bg[:, None, ...], similarity_fg[:, None, ...]), dim=1) * 10.0  # [4,2,60,60]
        return out

    def masked_average_pooling(self, feature, mask):
        """
        通过mask_pool操作获取对应特征的原型。
        feature: [4,1024,60,60]
        mask:  [4,473,473]
        return feature #[4,1024]
        """
        mask = F.interpolate(mask.unsqueeze(1), size=feature.shape[-2:], mode='bilinear', align_corners=True)
        masked_feature = torch.sum(feature * mask, dim=(2, 3)) / (mask.sum(dim=(2, 3)) + 1e-5)
        return masked_feature
