import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function
import pdb

class MemoryLayer(Function):
    # def __init__(self, ):
    #     super(MemoryLayer, self).__init__()
    #     self.memory = memory
    @staticmethod
    def forward(ctx,inputs, targets,lut,alpha):
        ctx.save_for_backward(inputs, targets, lut,alpha)
        outputs = inputs.mm(lut.t())
        return outputs

    @staticmethod
    def backward(ctx, grad_outputs):
        inputs, targets, lut,alpha=ctx.saved_tensors
        grad_inputs = None
        if ctx.needs_input_grad[0]:
            grad_inputs = grad_outputs.mm(lut)
        for x, y in zip(inputs, targets):
            lut[y] = alpha *lut[y] + (1. - alpha) * x
            lut[y] /= lut[y].norm()
        return grad_inputs, None,None,None

def logit_cal(inputs, targets,lut,alpha):
    return MemoryLayer.apply(inputs, targets,lut,alpha)

class Memory(nn.Module):
    def __init__(self, num_features,mem,alpha=0.01): # feat 维度， num target 数量
        super(Memory, self).__init__()
        self.num_features = num_features
        self.alpha = alpha
        self.mem = mem

        self.register_buffer("lut", self.mem)

    def forward(self, inputs, targets, epoch=None):
        alpha = 0.5 #0.5 * (epoch+1) / 6  # 如果是1 则不更新，0.5 #
        targets = torch.cat(targets) # 所有roi的label，single class
        label = targets - 1  # background label = -1

        #过滤gt中无ID的box，弃用
        use_gt_box=False
        if use_gt_box:
            inds_wid = label != 16000-1
            label = label[inds_wid]
            inputs = inputs[inds_wid.unsqueeze(1).expand_as(inputs)].view(-1, self.num_features)

        # 过滤背景
        inds = label >= 0
        label = label[inds]
        inputs = inputs[inds.unsqueeze(1).expand_as(inputs)].view(-1, self.num_features) # 过滤非背景的候选区域特征

        filter_noisy=False # 弃用 剔除噪声，应该在得到detections后进行距离矩阵计算后，剔除离群点，那里将离群点命名为30000
        if filter_noisy:
            # #对 噪音进行过滤,第一判断top2-10>0.6,
            # #注意这里不是input和mem计算的sim，而是label里对应的mem-vec和mem的sim计算
            # mem_vec = self.lut[label]
            #
            # mem_sim = mem_vec.mm(self.lut.t())
            # mem_sim_sorted, index_sorted = torch.sort(mem_sim, dim=1, descending=True)
            # second_sim=torch.tensor([x[0] for x in mem_sim_sorted])
            # inds_bigger_06=second_sim>0.6
            #
            # label=label[inds_bigger_06]
            # inputs = inputs[inds_bigger_06.unsqueeze(1).expand_as(inputs)].view(-1, self.num_features)
            # #很少有小于0.6的top2，
            inds_wid = label < 30000 - 1
            label = label[inds_wid]
            inputs = inputs[inds_wid.unsqueeze(1).expand_as(inputs)].view(-1, self.num_features)

            logits = logit_cal(inputs, label, self.lut, torch.tensor(alpha))

            logits_nid, label_nid, inputs_nid=None,None,None
            noisy_oem=True
            if noisy_oem:
                inds_nid = label >= 30000 - 1
                label_nid = label[inds_nid]
                inputs_nid = inputs[inds_nid.unsqueeze(1).expand_as(inputs)].view(-1, self.num_features)
                logits_nid = logit_cal(inputs_nid, label_nid, self.lut, torch.tensor(alpha))

            return logits, label, inputs,logits_nid,label_nid,inputs_nid

        else:
            # 正常流程
            logits = logit_cal(inputs, label, self.lut, torch.tensor(alpha))

            return logits, label, inputs
