from .resnet import resnet50, resnet101

import torch
from torch import nn
import torch.nn.functional as F


class MSANet(nn.Module):
    def __init__(self, backbone, shot=1, pretrained=True, ppm_scales=[60, 30, 15, 8], classes=2,):
        super(MSANet, self).__init__()
        backbone = eval(backbone)(pretrained=pretrained)  # 创建backbone
        self.layer0 = nn.Sequential(backbone.conv1, backbone.bn1, backbone.relu, backbone.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4
        self.shot = shot
        self.ppm_scales = ppm_scales

        # 通过设置这里，会让特征不降维
        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 512 + 1024
        reduce_dim = 256

        self.down_query = nn.Sequential(
            nn.Conv2d(fea_dim, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=0.5)
        )

        self.down_supp = nn.Sequential(
            nn.Conv2d(fea_dim, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=0.5)
        )

        self.cls = nn.Sequential(
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=0.1),
            nn.Conv2d(reduce_dim, classes, kernel_size=1)
        )

        self.pyramid_bins = ppm_scales
        self.avgpool_list = []
        for scale in self.pyramid_bins:
            self.avgpool_list.append(nn.AdaptiveAvgPool2d(scale))

        mask_add_num = 1
        init_merge = []
        beta_conv = []
        inner_cls = []
        for scale in self.pyramid_bins:
            init_merge.append(nn.Sequential(
                nn.Conv2d(reduce_dim*2 + mask_add_num, reduce_dim, kernel_size=1, padding=0, bias=False),
                nn.ReLU(inplace=True),
            ))
            beta_conv.append(nn.Sequential(
                nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
                nn.ReLU(inplace=True),
                nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
                nn.ReLU(inplace=True)
            ))
            inner_cls.append(nn.Sequential(
                nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
                nn.ReLU(inplace=True),
                nn.Dropout2d(p=0.1),
                nn.Conv2d(reduce_dim, classes, kernel_size=1)
            ))

        self.init_merge = nn.ModuleList(init_merge)
        self.beta_conv = nn.ModuleList(beta_conv)
        self.inner_cls = nn.ModuleList(inner_cls)

        self.res1 = nn.Sequential(
            nn.Conv2d(reduce_dim*len(self.pyramid_bins), reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True),
        )
        self.res2 = nn.Sequential(
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
            nn.ReLU(inplace=True),
        )

        self.GAP = nn.AdaptiveAvgPool2d(1)

        self.alpha_conv = []
        for idx in range(len(self.pyramid_bins)-1):
            self.alpha_conv.append(nn.Sequential(
                nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0, bias=False),
                nn.ReLU()
            ))
        self.alpha_conv = nn.ModuleList(self.alpha_conv)

    def forward(self, img_s_list, mask_s_list, img_q, mask_q):
        """
        img_s_list: List
        mask_s_list: List
        img_q:[batch_size,3,473,473]
        mask_q:[batch_size,473,473]

        """
        h, w = img_q.shape[-2:]

        # step 1: 提取特征
        supp_feat_list, final_supp_list, query_feat, final_query_feat = self.extra_feature(img_s_list, img_q)

        # step 2: 获取粗略的先验分割图
        corr_query_mask = self.free_prior_mask(final_supp_list, mask_s_list, final_query_feat)

        # step 3: 特征丰富模块
        out, out_list = self.FEM(supp_feat_list, query_feat, corr_query_mask, mask_s_list)

        out = F.interpolate(out, size=(h, w), mode='bilinear', align_corners=True)  # [4,2,60,60 ] -> [4,2,473,473]

        if self.training:
            return out, out_list
        return out

    def extra_feature(self, img_s_list, img_q):
        """
        step 1: 第一步，分别获取支持集和查询集的特征。X_s,X_q
        """
        # feature maps of support images
        supp_feat_list = []
        final_supp_list = []

        #  获取支持集的特征
        for k in range(len(img_s_list)):
            with torch.no_grad():
                s_0 = self.layer0(img_s_list[k])  # [4,128,119,119]
                s_1 = self.layer1(s_0)  # [4,128,119,119] -> [4,256,119,119]
                s_2 = self.layer2(s_1)  # [4,256,119,119] -> [4,512,60,60]
                s_3 = self.layer3(s_2)  # [4,512,60,60]   -> [4,1024,60,60]
                s_4 = self.layer4(s_3)  # [4,1024,60,60]  -> [4,2048,60,60]

            final_supp_list.append(s_4)  # 添加高级语义特征

            # 对中级语义特征进行处理
            supp_feat = torch.cat([s_2, s_3], 1)  # [4,512,60,60] + [4,1024,60,60] =[4,1536,60,60]
            supp_feat = self.down_supp(supp_feat)  # [4,256,60,60]
            supp_feat_list.append(supp_feat)

        # 获取查询集图像的特征
        with torch.no_grad():
            q_0 = self.layer0(img_q)
            q_1 = self.layer1(q_0)
            q_2 = self.layer2(q_1)
            q_3 = self.layer3(q_2)  # [4,1024,60,60]
            final_query_feat = self.layer4(q_3)  # [4,1024,60,60] -> [4,2048,60,60]

        query_feat = torch.cat([q_2, q_3], 1)
        query_feat = self.down_query(query_feat)

        """
        supp_feat_list: 支持集对应的中级语义特征的原型 shot x [batch_size,256,60,60]
        final_supp_list : 支持集对应的高级语义特征 shot x [batch_size,2048,60,60]
        query_feat: 查询集的中级语义特征 [batch_size,256,60,60]
        final_query_feat: 查询集的中级语义特征 [batch_size,2048,60,60]
        """
        return supp_feat_list, final_supp_list, query_feat, final_query_feat

    def free_prior_mask(self, final_supp_list, mask_s_list, final_query_feat):
        """
        step 2:根据 支持集的高级语义特征，支持集的掩码 以及查询集的高级语义特征，生成粗略的先验掩码

        Args:
            final_supp_list: List 支持集的高级语义特征
                            shot x [batch_size,2048,473,473]
            mask_s_list: List 支持集的掩码
                            shot x [batch_size,473,473]
            final_query_feat :查询集的高级语义特征
                            [batch_size,2048,473,473]
        Returns: 先验掩码 [batch_size,1,60,60]
        """

        corr_query_mask_list = []
        cosine_eps = 1e-7
        for i, tmp_supp_feat in enumerate(final_supp_list):  # 这个就是掩码后的特征 # [4,2048,60,60]

            tmp_mask = F.interpolate(mask_s_list[i].unsqueeze(1).float(), size=tmp_supp_feat.shape[-2:],
                                     mode='bilinear', align_corners=True)  # [4,1,60,60]

            tmp_supp_feat_4 = tmp_supp_feat * tmp_mask  # [4,2048,60,60]    掩码后支持集的高级语义特征
            q = final_query_feat  # [4,2048,60,60] # 查询集的高级语义特征
            s = tmp_supp_feat_4  # [4,2048,60,60] 支持集的高级语义特征
            bsize, ch_sz, h, w = q.size()[:]  # [4,2048,60]

            tmp_query = q
            tmp_query = tmp_query.contiguous().view(bsize, ch_sz, -1)  # [4,2048,3600]
            tmp_query_norm = torch.norm(tmp_query, 2, 1, True)  # [4,1,3600] # 查询集特征进行归一化。意思就是每个像素的归一化的值

            tmp_supp = s      # [4,2048,60,60] #
            tmp_supp = tmp_supp.contiguous().view(bsize, ch_sz, -1)  # [4,2048,3600]
            tmp_supp = tmp_supp.contiguous().permute(0, 2, 1)  # [4,3600,2048,]
            tmp_supp_norm = torch.norm(tmp_supp, 2, 2, True)  # [4,3600,1,] # 支持集的特征进行归一化，意思就是每个像素的归一化的值

            similarity = torch.bmm(tmp_supp, tmp_query)/(torch.bmm(tmp_supp_norm, tmp_query_norm) + cosine_eps)  # [4,3600,3600] 计算相似度
            similarity = similarity.max(1)[0].view(bsize, h*w)   # [4,3600] 取相似度中最高的那个
            similarity = (similarity - similarity.min(1)[0].unsqueeze(1)) / \
                (similarity.max(1)[0].unsqueeze(1) - similarity.min(1)[0].unsqueeze(1) + cosine_eps)  # [4,3600] 对相似度进行归一化
            corr_query = similarity.view(bsize, 1, h, w)  # [4,1,60,60] 重新resize成 图片大小
            corr_query = F.interpolate(corr_query, size=(final_query_feat.size()[2], final_query_feat.size()[3]),
                                       mode='bilinear', align_corners=True)  # [4,1,60,60]
            corr_query_mask_list.append(corr_query)  # 完成这个batch的计算。添加

         # List = shot * [batch_size,1,h,w]  5 x [4,1,60,60]
        corr_query_mask = torch.cat(corr_query_mask_list, 1).mean(1).unsqueeze(1)   # shot * [batch_size,1,h,w] -> [shot x batchsize,1,60,60]
        corr_query_mask = F.interpolate(corr_query_mask, size=(final_query_feat.size(2), final_query_feat.size(3)),
                                        mode='bilinear', align_corners=True)  # 对所有图片进行放大

        return corr_query_mask

    def FEM(self, supp_feat_list, query_feat, corr_query_mask, mask_s_list):
        """
        step 3: 特征融合模块

        Args:
            supp_feat_list: 支持集的中级语义特征
            query_feat: 查询集的中级语义特征
            corr_query_mask : 粗略的先验掩码
            mask_s_list: 支持集的掩码
        """

        if self.shot > 1:
            supp_feat = supp_feat_list[0]
            h, w = supp_feat.shape[-2:]
            mask = F.interpolate(mask_s_list[0].unsqueeze(1).float(), size=(h, w), mode='bilinear', align_corners=True)  # [4,1,60,60]
            supp_feat = self.Weighted_GAP(supp_feat, mask)  # [4,1024,1,1]  获得查询集前景的原型

            for i in range(1, len(supp_feat_list)):
                mask = F.interpolate(mask_s_list[i].unsqueeze(1).float(), size=(h, w), mode='bilinear', align_corners=True)  # [4,1,60,60]
                tmp = self.Weighted_GAP(supp_feat_list[i], mask)  # [4,256,1,1]  获得查询集前景的原型
                supp_feat += tmp
            supp_feat /= len(supp_feat_list)

        out_list = []
        pyramid_feat_list = []

        for idx, scale in enumerate(self.pyramid_bins):  # [60,30,15,8]

            query_feat_bin = self.avgpool_list[idx](query_feat)  # [4,256,60,60] -> [4,256,scale,scale]
            supp_feat_bin = supp_feat.expand(-1, -1, scale, scale)  # [4,256,1,1] -> [4,256,scale,scale]
            corr_mask_bin = F.interpolate(corr_query_mask, size=(scale, scale), mode='bilinear', align_corners=True)  # [4,1,scale,scale]

            # [4,256,scale,scale] + [4,256,scale,scale] + [4,1,scale,scale] = [4,513,scale,scale]
            merge_feat_bin = torch.cat([query_feat_bin, supp_feat_bin, corr_mask_bin], 1)
            merge_feat_bin = self.init_merge[idx](merge_feat_bin)  # [4,513,scale,scale] -> [4,256,scale,scale]

            if idx >= 1:
                pre_feat_bin = pyramid_feat_list[idx-1].clone()
                pre_feat_bin = F.interpolate(pre_feat_bin, size=(scale, scale), mode='bilinear', align_corners=True)
                rec_feat_bin = torch.cat([merge_feat_bin, pre_feat_bin], 1)
                merge_feat_bin = self.alpha_conv[idx-1](rec_feat_bin) + merge_feat_bin

            merge_feat_bin = self.beta_conv[idx](merge_feat_bin) + merge_feat_bin  # [4,256,scale,scale] ,[4,256,30,30]
            inner_out_bin = self.inner_cls[idx](merge_feat_bin)  # 直接出分割结果 [4,2,scale,scale]

            # [4,256,scale,scale] -> [4,256,60,60]
            merge_feat_bin = F.interpolate(merge_feat_bin, size=(query_feat.size(2), query_feat.size(3)), mode='bilinear', align_corners=True)

            pyramid_feat_list.append(merge_feat_bin)
            out_list.append(inner_out_bin)  # [4,2,60,60] ,[4,2,30,30],[4,2,15,15] ,[4,2,8,8]

        query_feat = torch.cat(pyramid_feat_list, 1)  # 4个[4,256,60,60]
        query_feat = self.res1(query_feat)  # [4,1024,60,60]-> [4,256,60,60]
        query_feat = self.res2(query_feat) + query_feat  # [4,256,60,60]
        out = self.cls(query_feat)  # [4,2,60,60]

        return out, out_list

    def Weighted_GAP(self, supp_feat, mask):
        supp_feat = supp_feat * mask
        feat_h, feat_w = supp_feat.shape[-2:][0], supp_feat.shape[-2:][1]
        area = F.avg_pool2d(mask, (supp_feat.size()[2], supp_feat.size()[3])) * feat_h * feat_w + 0.0005
        supp_feat = F.avg_pool2d(input=supp_feat, kernel_size=supp_feat.shape[-2:]) * feat_h * feat_w / area
        return supp_feat
