from .resnet import resnet50, resnet101

import torch
from torch import nn
import torch.nn.functional as F



class PANet(nn.Module):
    def __init__(self, backbone, shot=1, pretrained=True):
        super(PANet, self).__init__()
        backbone = eval(backbone)(pretrained=pretrained)  # 创建backbone-resnet50。backbone 取前几层。
        self.layer0 = nn.Sequential(backbone.conv1, backbone.bn1, backbone.relu, backbone.maxpool)
        self.layer1, self.layer2, self.layer3 = backbone.layer1, backbone.layer2, backbone.layer3
        self.shot = shot

    def forward(self, img_s_list, mask_s_list, img_q, mask_q):
        """
        Args:
            img_s_list: support images
                        List   shape=shot x [batch size,3,473,473]
            mask_s_list: masks for support images
                    List  shape=shot x [batch size,473,473]
            img_q:  query images
                    [batch_size,3,473,473]
            mask_q:  query images
                    [batch_size,473,473]
        """
        h, w = img_q.shape[-2:]

        # feature maps of support images
        feature_s_list = []
        #  获取支持集的特征
        for k in range(len(img_s_list)):
            with torch.no_grad():
                s_0 = self.layer0(img_s_list[k])
                s_0 = self.layer1(s_0)
            s_0 = self.layer2(s_0)
            s_0 = self.layer3(s_0)
            feature_s_list.append(s_0)
            del s_0

        # 获取查询集图像的特征
        with torch.no_grad():
            q_0 = self.layer0(img_q)
            q_0 = self.layer1(q_0)
        q_0 = self.layer2(q_0)
        feature_q = self.layer3(q_0)  # [4,1024,60,60]

        # foreground(target class) and background prototypes pooled from K support features
        feature_fg_list = []
        feature_bg_list = []

        for k in range(len(img_s_list)):
            feature_fg = self.masked_average_pooling(feature_s_list[k], (mask_s_list[k] == 1).float())[None, :]  # feature_fg=[1,4,1024]
            feature_bg = self.masked_average_pooling(feature_s_list[k], (mask_s_list[k] == 0).float())[None, :]  # feature_bg=[1,4,1024]
            feature_fg_list.append(feature_fg)
            feature_bg_list.append(feature_bg)

        # 对shot个图片进行平均，计算原型 [4,1024,1,1]
        FP = torch.mean(torch.cat(feature_fg_list, dim=0), dim=0).unsqueeze(-1).unsqueeze(-1)
        # 背景原型 [4,1024,1,1]
        BP = torch.mean(torch.cat(feature_bg_list, dim=0), dim=0).unsqueeze(-1).unsqueeze(-1)

        # 计算查询特征和前景和背景的原型 之间的相似度。计算出初步的分割掩码
        out_0 = self.similarity_func(feature_q, FP, BP)  # [4,2,60,60]
        out_0 = F.interpolate(out_0, size=(h, w), mode="bilinear", align_corners=True)  # [4,2,473,473]

        # 如果是训练阶段，不需要对齐
        # Prototype alignment regularization (PAR) 原型对齐阶段
        if self.training:
            out_ls = []
            # 通过 查询集的输出的特征，根据掩码，输出查询集的前景和背景的原型
            fg_q = self.masked_average_pooling(feature_q, (mask_q == 1).float())[None, :].squeeze(0)  # [4,1024]
            bg_q = self.masked_average_pooling(feature_q, (mask_q == 0).float())[None, :].squeeze(0)  # [4,1024]

            for i in range(self.shot):
                # 根据查询集的原型，计算出支持集的分割图
                self_out = self.similarity_func(feature_s_list[i], fg_q[..., None, None], bg_q[..., None, None])
                self_out = F.interpolate(self_out, size=(h, w), mode="bilinear", align_corners=True)
                out_ls.append(self_out)

            return out_0, out_ls

        return out_0

    def similarity_func(self, feature_q, fg_proto, bg_proto):
        """
         通过计算相似度来进行分割
         Args:
            feature_q: [4,1024,60,60] 查询集特征
            fg_proto: [4,1024,1,1] 前景原型
            bg_proto: [4,1024,1,1] 背景原型
         return: [4,2,60,60] 初步的分割结果
        """
        similarity_fg = F.cosine_similarity(feature_q, fg_proto, dim=1)
        similarity_bg = F.cosine_similarity(feature_q, bg_proto, dim=1)
        out = torch.cat((similarity_bg[:, None, ...], similarity_fg[:, None, ...]), dim=1) * 10.0  # [4,2,60,60]
        return out

    def masked_average_pooling(self, feature, mask):
        """
        通过mask_pool操作获取对应特征的原型。
        Args:
            feature: [4,1024,60,60]
            mask:  [4,473,473]
        return prototype: [4,1024] 掩码后对应特征的原型
        """
        mask = F.interpolate(mask.unsqueeze(1), size=feature.shape[-2:], mode='bilinear', align_corners=True)
        masked_feature = torch.sum(feature * mask, dim=(2, 3)) / (mask.sum(dim=(2, 3)) + 1e-5)
        return masked_feature
