def train_val_memory(
        args,
        model,
        prev_model,
        train_dataset,
        dev_dataset,
        test_dataset,
        proto_features,
        task_prototypes,
        task_proto_features,
        data_dir
):
    enc, cls = model
    best_encoder = enc
    prev_enc, prev_cls = prev_model
    dataloader = DataLoader(train_dataset,
                            batch_size=args.batch_size,
                            shuffle=True,
                            drop_last=True)
    optimizer = AdamW([
        {'params': enc.parameters(), 'lr': args.encoder_lr},
        {'params': cls.parameters(), 'lr': args.classifier_lr}
    ],
        eps=args.adam_eps,
        no_deprecation_warning=True)

    prev_enc.eval()
    prev_cls.eval()

    best_micro_f1 = -1
    best_macro_f1 = 0
    loss_num = 0
    loss_total_con = 0
    loss_total_cls = 0
    for epoch in range(args.current_memory_epoch):
        enc.train()
        cls.train()
        bar = tqdm(dataloader, desc="Memory Epoch {}".format(epoch))
        for step, batch in enumerate(bar):
            for i in range(len(batch)):
                batch[i] = batch[i].to(args.device)
            hidden, feature = enc(
                input_ids=batch[0],
                attention_mask=batch[1])
            with torch.no_grad():
                prev_hidden, prev_feature = prev_enc(
                    input_ids=batch[0],
                    attention_mask=batch[1])
            labels = batch[3]
            loss_con = contrastive_loss(args=args,
                                        feature=feature,
                                        labels=labels,
                                        proto_features=proto_features,
                                        prev_feature=prev_feature)
            loss_con.backward(retain_graph=True)
            loss_cls = replay_loss(
                args=args,
                cls=cls,
                prev_cls=prev_cls,
                hidden=hidden,
                prev_hidden=prev_hidden,
                labels=labels,
                prototypes=None,
                )
            loss_cls.backward()
            loss_num += 1
            loss_total_con += loss_con.item()
            loss_total_cls += loss_cls.item()
            bar.set_description("epoch:{}, loss_con:{:.4f}, mean:{:.4f}, loss_cls:{:.4f}, mean:{:.4f}".
                                format(epoch, loss_con.item(), loss_total_con/loss_num,
                                       loss_cls.item(), loss_total_cls/loss_num))


            optimizer.step()
            optimizer.zero_grad()

        macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(
            args=args,
            model=enc,
            classifier=cls,
            valdata=dev_dataset,
            proto_features=proto_features,
            tsne=False)
        with open(args.report_file, "a") as f:
            f.write(
                "dev: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}\n".format(epoch, macro_f1["f1"], micro_f1["f1"]))
        macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(
            args=args,
            model=enc,
            classifier=cls,
            valdata=test_dataset,
            proto_features=proto_features,
            tsne=False)
        if micro_f1["f1"] >= best_micro_f1:
            best_encoder = enc
            best_micro_f1 = micro_f1["f1"]
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            torch.save(enc.state_dict(), os.path.join(output_dir, "enc_.pt"))
            torch.save(cls.state_dict(), os.path.join(output_dir, "cls_.pt"))
            logger.info("save model to {}".format(output_dir))

        with open(args.report_file, "a") as f:
            f.write(
                "test: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}\n".format(epoch, macro_f1["f1"], micro_f1["f1"]))
            f.write("\n")

        proto_hidden, proto_features = get_prototypes(
            args=args,
            model=enc,
            labels_list=labels_list,
            train_set=train_dataset,
            data_dir=data_dir,
        )
        proto_hidden, proto_features = \
            (1 - args.beta) * task_prototypes + args.beta * proto_hidden, \
            (1 - args.beta) * task_proto_features + args.beta * proto_features

        proto_hidden = F.layer_norm(proto_hidden, [args.hidden_dim])
        proto_features = F.normalize(proto_features, p=2, dim=1)

    with open(args.report_file, "a") as f:
        f.write("memory best_micro_f1: {:.4f}\n".format(best_micro_f1))
        f.write(
            "====================================\n")
    return best_encoder



def compute_cos_sim(tensor_a, tensor_b):
    norm_a = torch.norm(tensor_a, dim=1).unsqueeze(1) # [k, 1]
    norm_b = torch.norm(tensor_b, dim=1).unsqueeze(0) # [1, n]
    cos_sim = torch.mm(tensor_a, tensor_b.T) / torch.mm(norm_a, norm_b) # [k, n]
    return cos_sim

def replay_loss(args, cls, prev_cls, hidden, prev_hidden, labels, prototypes=None,
                ):
    # cross entropy
    celoss, logits = cls(hidden, labels)
    with torch.no_grad():
        prev_logits, = prev_cls(prev_hidden)
    logits, prev_logits = logits.view(-1, logits.shape[-1]), prev_logits.view(-1, prev_logits.shape[-1])

    if prototypes is None:
        index = prev_logits.shape[1]
        source = F.log_softmax(logits[:, :index], dim=1)
        target = F.softmax(prev_logits, dim=1) + 1e-8
        # 线性输出算kl散度
        kdloss = F.kl_div(source, target)
    else:
        # focal knowledge distillation
        with torch.no_grad():
            # 与所有类别的余弦相似度
            sim = compute_cos_sim(hidden, prototypes)
            prev_sim = sim[:, :prev_logits.shape[1]]  # [batch_size, prev_rel_num]
            # 之前模型中的关系的概率
            prev_sim = F.softmax(prev_sim / args.kd_temp, dim=1)

            prob = F.softmax(logits, dim=1)
            # 减去被预测为（实际上是）正样本的概率
            focal_weight = 1.0 - torch.gather(prob, dim=1, index=labels.unsqueeze(1)).squeeze()
            focal_weight = focal_weight ** args.gamma

        # 非本任务关系的logits
        source = logits.narrow(1, 0, prev_logits.shape[1])
        source = F.log_softmax(source, dim=1)
        target = F.softmax(prev_logits, dim=1)
        # 之前任务中的关系， 现在的模型的原型相似度概率与之前模型的线性输出相乘
        target = target * prev_sim + 1e-8
        kdloss = torch.sum(-source * target, dim=1)
        kdloss = torch.mean(kdloss * focal_weight)

    rep_loss = celoss + args.kd_lambda1 * kdloss
    return rep_loss