import torch
import torch.nn as nn
import torch.nn.functional as F


class MemoryBank(nn.Module):

    def cosine_similarity(self, tensor_1, tensor_2):
        normalized_tensor_1 = F.normalize(tensor_1, p=2, dim=0)
        normalized_tensor_2 = F.normalize(tensor_2, p=2, dim=0)
        cosine_sim = torch.sum(normalized_tensor_1 * normalized_tensor_2)
        return cosine_sim

    # 当训练并且不是微调时会存储记忆, 否则直接使用记忆
    def __init__(self):
        super(MemoryBank, self).__init__()
        self.memory_bank = [] 
        self.saliency_bank = [] 

    def add(self, x, target=None, video_idx=-1):
        # 训练时存储记忆
        if target is not None and video_idx != -1:
            target = target.view(1,1,1,-1)
            x = x.view(1,1,1,-1)
            target = F.interpolate(target, x.size()[-2:], mode='bilinear', align_corners=True)
            target = target.view(1, -1)
            x = x.view(1, -1)
            # 每10个视频为一组记忆, 记忆递减按照指数回退方法
            true_idx = video_idx % 10
            if len(self.memory_bank) <= true_idx:
                self.memory_bank.append(torch.tensor(x))
                self.saliency_bank.append(torch.tensor(target))
            else:
                self.memory_bank[true_idx] = self.memory_bank[true_idx] * 0.995 + 0.005 * x;
                self.saliency_bank[true_idx] = self.saliency_bank[true_idx] * 0.995 + 0.005 * target;
            return x

        # 测试时取出记忆
        for idx in range(len(self.memory_bank)):
            feature = self.memory_bank[idx]
            saliency = self.saliency_bank[idx]
            cos = self.cosine_similarity(x, feature)
            if cos < 0.3:
                return 0.05 * saliency + 0.95 * x
        return x

    def forward(self, x, target=None, video_idx=-1):
        re_batch = []
        xx = x.view(x.size(0), -1)
        if target is not None and video_idx != -1:
            tt = target.view(target.size(0), -1)
            for batch_id in range(x.size(0)):
                re_batch.append(self.add(xx[batch_id], tt[batch_id], video_idx))
            re_batch = torch.stack(re_batch, dim=0)
            return re_batch.view(x.size())

        for batch_id in range(x.size(0)):
            re_batch.append(self.add(xx[batch_id]))
        re_batch = torch.stack(re_batch, dim=0)
        return re_batch.view(x.size())


if __name__ == '__main__':
    x = torch.randn((1,768,16,12,7))
    mm = MemoryBank()
    for i in range(20):
        mm.update(x, x)
    print(mm.memory_bank)

