import argparse
import logging
import os
import time
from datetime import datetime

import nni
import numpy as np
import torch
import torch.nn.functional as F
from datasets import load_metric
from nni.utils import merge_parameter
from torch import nn
from torch.nn import CrossEntropyLoss, KLDivLoss
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from transformers import BertTokenizer, BertConfig, AdamW

from dataloading import load_and_cache_examples
from fiscore import get_f1
from metric import compute_metrics
from model import BertEncoder, Classifier
from tsne import tsne_visualization
from utils import get_labels_dy

torch.set_float32_matmul_precision('high')

import os
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"

def train_current_task(
        args,
        encoder,
        prev_encoder,
        classifier,
        prev_classifier,
        train_dataset,
        prev_dev_dataset,
        dev_dataset,
        test_dataset,
        labels_list,
        output_dir,
        task_id
):
    torch.autograd.set_detect_anomaly(True)

    with open(args.report_file, "a") as f:
        f.write("==================================\n")
        f.write("kd_percent: {}\n".format(args.kd_percent))


    dataloader = DataLoader(train_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            drop_last=False)

    if task_id > 0 and prev_encoder is not None:
        print("====================Getting Old Probability====================")
        bar = tqdm(dataloader, desc="Old Probability")
        prev_linear_output = []
        prev_features = []
        all_input_ids, all_input_mask, all_segment_ids, all_label_ids = [], [], [], []
        for step, batch in enumerate(bar):
            for i in range(len(batch)):
                batch[i] = batch[i].to(args.device)
            with torch.no_grad():

                prev_hidden, prev_feature = prev_encoder.forward(
                    input_ids=batch[0],
                    attention_mask=batch[1])

                prev_output = prev_classifier.forward(
                    hidden=prev_hidden,
                    labels=None)[0]
                prev_output_prob = F.softmax(prev_output, dim=-1)
                prev_linear_output.append(prev_output_prob)
                prev_features.append(prev_feature)
                all_label_ids.append(batch[3])
                all_input_ids.append(batch[0])
                all_input_mask.append(batch[1])
                all_segment_ids.append(batch[2])

        torch.cuda.empty_cache()
        all_input_ids = torch.cat(all_input_ids, dim=0)
        all_input_mask = torch.cat(all_input_mask, dim=0)
        all_segment_ids = torch.cat(all_segment_ids, dim=0)
        all_label_ids = torch.cat(all_label_ids, dim=0)
        all_linear_output = torch.cat(prev_linear_output, dim=0)
        all_prev_features = torch.cat(prev_features, dim=0)
        train_dataset_with_old_prob = TensorDataset(
            all_input_ids,
            all_input_mask,
            all_segment_ids,
            all_label_ids,
            all_linear_output,
            all_prev_features)

        dataloader = DataLoader(
            train_dataset_with_old_prob,
            batch_size=args.batch_size,
            shuffle=True,
            drop_last=False)



    proto_features = None

    optimizer = AdamW(
        [
            {"params": encoder.parameters(), "lr": args.encoder_lr},
            {"params": classifier.parameters(), "lr": args.classifier_lr},
        ],
        eps=args.adam_eps,
        no_deprecation_warning=True
    )
    loss_func = KLDivLoss(reduction="batchmean")
    LogSoftmax = nn.LogSoftmax(dim=-1)

    if len(labels_list) - args.per_types > 1:
        classifier.weight_copy(
            prev_classifier=prev_classifier)
        print("=================weight copy=================")
        print("-----------prev_dev evaluate------------")
        macro_f1, micro_f1, preds_list, preds, out_label_ids, results = evaluate(args, encoder, classifier,
                                                                        prev_dev_dataset)



    best_f1 = 0
    best_f1_mar = 0
    best_epoch = -1
    loss_num = 0
    loss_total = 0
    loss_kd = torch.tensor([0])
    scloss = torch.tensor([0])
    marginloss = torch.tensor(0)
    fkdloss = torch.tensor(0)
    best_encoder = encoder
    best_classifier = classifier
    best_results = None
    no_increace_num = 0
    for epoch in range(args.current_epoch):
        print("================Model Training Current: epoch {}================".format(epoch))

        encoder.train()
        classifier.train()
        bar = tqdm(dataloader, desc="Current Epoch {}".format(epoch))
        # all_input_ids, all_input_mask, all_segment_ids, all_label_ids
        for step, batch in enumerate(bar):
            for i in range(len(batch)):
                batch[i] = batch[i].to(args.device)

            # encoder的输出已经经过了归一化
            hidden, feature = encoder.forward(
                            input_ids=batch[0],
                            attention_mask=batch[1])
            if args.use_con_current:
                if args.use_scloss:
                    scloss = contrastive_ent(args, batch[3], feature)


            # print(batch[3].min(), batch[3].max())
            ce_loss, current_output = classifier.forward(
                hidden=hidden,
                labels=batch[3])
            labels_flatten = batch[3].view(-1)

            condition = (labels_flatten < len(labels_list) - args.per_types) & (labels_flatten != -100)

            idx = torch.where(condition)[0]

            current_output = current_output.view(-1, current_output.shape[-1])[idx]
            current_output_prob = LogSoftmax(current_output[:, :current_output.shape[-1] - args.per_types])
            if prev_encoder is not None:
                if args.use_kd:
                    prev_output_prob = batch[4]
                    prev_output_prob = prev_output_prob.view(-1, prev_output_prob.shape[-1])[idx]
                    loss_kd = loss_func(
                        current_output_prob,
                        prev_output_prob)
                    loss = ce_loss + args.kd_percent * loss_kd * task_id

                else:
                    loss = ce_loss

            else:
                loss = ce_loss

            if prev_encoder is not None:
                if args.use_con_article and epoch >= args.con_start_epoch:
                    # loss_con, (sc_loss, marginloss, fkdloss) = contrastive_loss(args=args,
                    #                                                             feature=feature,
                    #                                                             labels=batch[3],
                    #                                                             proto_features=proto_features,
                    #                                                             prev_feature=batch[5])
                    # loss_con, (sc_loss, marginloss, fkdloss) = contrastive_ent_with_distill(
                    #     args=args,
                    #     labels=batch[3],
                    #     feature=feature,
                    #     prev_feature=batch[5])
                    if args.use_con_distill:
                        fkdloss = con_distill(
                            args=args,
                            labels=batch[3],
                            feature=feature,
                            prev_feature=batch[5]
                        )
                    elif args.use_con_distill_entropy:
                        fkdloss = con_distill_entropy(
                            args=args,
                            labels=batch[3],
                            feature=feature,
                            prev_feature=batch[5]
                        )

                    if args.use_scloss:
                        loss_con = scloss + args.kd_lambda2 * fkdloss * task_id + marginloss
                    else:
                        loss_con = args.kd_lambda2 * fkdloss * task_id + marginloss
                    loss = loss + loss_con
            else:
                if args.use_scloss:
                    loss = loss + scloss
                else:
                    loss = loss

            loss_num += 1



            if args.use_con_current:
                if args.use_con_article:
                    if epoch >= args.con_start_epoch:
                        loss_total += loss
                        bar.set_description(
                            "mean: {:.4f}, loss: {:.4f}, ce: {:.4f}, kd: {:.4f}, sc: {:.4f}, mar:{:.4f}, fkd:{:.4f}".
                            format(loss_total.item() / loss_num, loss.item(), ce_loss.item(), loss_kd.item(),
                                   scloss.item(), marginloss.item(), fkdloss.item()))
                    elif epoch < args.con_start_epoch:
                        loss = scloss + loss
                        loss_total += loss
                        bar.set_description("mean_loss: {:.4f}, loss: {:.4f}, ce_loss: {:.4f}, kd_loss: {:.4f}, scloss: {:.4f}".
                                            format(loss_total.item() / loss_num, loss.item(), ce_loss.item(), loss_kd.item(),
                                                   scloss.item()))
                else:
                    loss = scloss + loss
                    loss_total += loss
                    bar.set_description(
                        "mean_loss: {:.4f}, loss: {:.4f}, ce_loss: {:.4f}, kd_loss: {:.4f}, scloss: {:.4f}".
                        format(loss_total.item() / loss_num, loss.item(), ce_loss.item(), loss_kd.item(),
                               scloss.item()))
            else:
                loss_total += loss
                bar.set_description("mean_loss: {:.4f}, loss: {:.4f}, ce_loss: {:.4f}, kd_loss: {:.4f}".
                                format(loss_total.item()/loss_num, loss.item(), ce_loss.item(), loss_kd.item()))
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()




        # print("-----------train evaluate------------")
        # macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(args, encoder, classifier, train_dataset, proto_features=proto_features)
        # with open(args.report_file, "a") as f:
        #     f.write("now_train: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}\n".format(epoch, macro_f1["f1"],
        #                                                                               micro_f1["f1"]))

        if args.using_memory:
            proto_hidden, proto_features = get_prototypes(
                args=args,
                model=encoder,
                labels_list=labels_list,
                train_set=train_dataset,
                data_dir=data_dir,
            )

        print("-----------dev evaluate------------")

        macro_f1, micro_f1, preds_list, preds, out_label_ids, results = evaluate(
            args, encoder, classifier, dev_dataset, proto_features=proto_features)
        with open(args.report_file, "a") as f:
            f.write("now_dev: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}\n".format(epoch, macro_f1["f1"], micro_f1["f1"]))

        if prev_encoder is not None:
            print("-----------prev_dev evaluate------------")
            macro_f1, micro_f1, preds_list, preds, out_label_ids, results = evaluate(
                args, encoder, classifier, prev_dev_dataset, proto_features=proto_features)
            with open(args.report_file, "a") as f:
                f.write("pre_test: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}\n".format(epoch, macro_f1["f1"],
                                                                                          micro_f1["f1"]))

            # macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(args, prev_encoder, prev_classifier,
            #                                                                 prev_dev_dataset)
            # macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(args, prev_encoder, classifier,
            #                                                                 prev_dev_dataset)
            # macro_f1, micro_f1, preds_list, preds, out_label_ids = evaluate(args, encoder, prev_classifier,
            #                                                                 prev_dev_dataset)


        print("-----------test evaluate------------")
        macro_f1, micro_f1, preds_list, preds, out_label_ids, results = evaluate(
            args, encoder, classifier, test_dataset, proto_features=proto_features)
        with open(args.report_file, "a") as f:
            f.write("test: epoch: {}, macro_f1: {:.4f}, micro_f1: {:.4f}\n".format(epoch, macro_f1["f1"], micro_f1["f1"]))
            f.write("\n")
        if micro_f1["f1"] >= best_f1:
            best_encoder = encoder
            best_classifier = classifier
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            torch.save(encoder.state_dict(), output_dir + "/encoder.pt")
            print("===============save encoder.pt==================")
            torch.save(classifier.state_dict(), output_dir + "/classifier.pt")
            print("==============save classifier.pt================")
            best_f1 = micro_f1["f1"]
            best_f1_mar = macro_f1["f1"]
            best_epoch = epoch
            best_results = results
            print("best_f1 = ", best_f1)
            no_increace_num = 0
        else:
            no_increace_num += 1
        if no_increace_num >= args.early_stop:
            print("===============early stop==================")
            break
    with open(args.report_file, "a") as f:
        f.write("task: {} best_micro_f1: {:.4f}\n".format(task_id, best_f1))
        f.write(str(best_results))
        f.write("------------------------------------------\n")

    with open(args.best_file, "a") as f:
        f.write("task: {}, best_epoch: {}, kd_percent: {}, kd_lambda2: {}, best_micro_f1: {:.4f}, best_marco_f1: {:.4f}\n".
                format(task_id, best_epoch, args.kd_percent, args.kd_lambda2, best_f1, best_f1_mar))

        f.write("------------------------------------------\n")

    return best_encoder

def evaluate(
        args,
        model,
        classifier,
        valdata,
        proto_features=None,
        tsne=False
):
    pad_label_id = CrossEntropyLoss().ignore_index
    pred_labels = []
    ground_truth = []
    model.eval()
    dataloader = DataLoader(valdata,
                            batch_size=args.batch_size,
                            drop_last=False)

    reps = np.array([])
    reps_labels = []

    bar = tqdm(dataloader, desc="Evaluating")
    for i, batch in enumerate(bar):
        for i in range(len(batch)):
            batch[i] = batch[i].to(args.device)

        with torch.no_grad():

            hidden, feature = model.forward(
                input_ids=batch[0],
                attention_mask=batch[1])
            logits = classifier.forward(hidden)[0]
            prob_cls = F.softmax(logits, dim=-1)
            if proto_features is not None:
                feature = feature.view(-1, feature.shape[-1])
                logits = torch.mm(feature, proto_features.T) / args.cl_temp
                prob_ncm = F.softmax(logits, dim=-1)
                prob_ncm = prob_ncm.view(prob_cls.shape[0], -1, prob_ncm.shape[-1])
                prob = prob_cls * args.alpha + prob_ncm * (1 - args.alpha)
            else:
                prob = prob_cls

            if tsne:
                x = hidden.view(-1, hidden.shape[-1])
                y = batch[3].view(-1)

                x = x[y != -100]
                y = y[y != -100]

                x_np = x.detach().cpu().numpy()
                y_np = y.detach().cpu().numpy()

                if len(reps) == 0:
                    reps = x_np
                    reps_labels = y_np
                else:
                    reps = np.concatenate((reps, x_np), axis=0)
                    reps_labels = np.concatenate((reps_labels, y_np), axis=0)



        pred = torch.argmax(prob, dim=-1)
        pred_labels.append(pred)
        ground_truth.append(batch[3])
        torch.cuda.empty_cache()

    if tsne:
        tsne_visualization(reps, reps_labels)
    preds = torch.cat(pred_labels, dim=0)
    out_label_ids = torch.cat(ground_truth, dim=0)

    preds = preds.detach().cpu().numpy()
    out_label_ids = out_label_ids.detach().cpu().numpy()





    # label_map = {i: "I-" + label for i, label in enumerate(labels_list)}
    label_map = {i: label for i, label in enumerate(labels_list)}
    label_map[0] = "O"

    out_label_list = [[] for _ in range(out_label_ids.shape[0])]
    preds_list = [[] for _ in range(out_label_ids.shape[0])]

    # 得到去除pad的真实标签和预测标签二维列表
    for i in range(out_label_ids.shape[0]):
        prev_ol = 0
        prev_p = 0
        for j in range(out_label_ids.shape[1]):
            if out_label_ids[i, j] != pad_label_id:
                if out_label_ids[i, j] != 0:
                    if prev_ol != out_label_ids[i, j]:
                        out_label_list[i].append("B-" + label_map[out_label_ids[i][j]])
                    elif prev_ol != 0 and prev_ol == out_label_ids[i][j]:
                        out_label_list[i].append("I-" + label_map[out_label_ids[i][j]])
                    prev_ol = out_label_ids[i][j]
                else:
                    out_label_list[i].append(label_map[out_label_ids[i][j]])
                    prev_ol = 0

            if out_label_ids[i, j] != pad_label_id:
                if preds[i, j] != 0:
                    if prev_p != preds[i, j]:
                        preds_list[i].append("B-" + label_map[preds[i][j]])
                    elif prev_p != 0 and prev_p == preds[i][j]:
                        preds_list[i].append("I-" + label_map[preds[i][j]])
                    prev_p = preds[i][j]
                else:
                    preds_list[i].append(label_map[preds[i][j]])
                    prev_p = 0

    metric = load_metric("./Seqeval.py")

    metric.add_batch(
        predictions=preds_list,
        references=out_label_list,
    )

    macro_f1, micro_f1, results = compute_metrics(metric)

    print("macro: pre = {:.4f}, rec = {:.4f}, f1 = {:.4f}".
          format(macro_f1["precision"], macro_f1["recall"], macro_f1["f1"]))
    print("micro: pre = {:.4f}, rec = {:.4f}, f1 = {:.4f}".
          format(micro_f1["precision"], micro_f1["recall"], micro_f1["f1"]))

    return macro_f1, micro_f1, preds_list, preds, out_label_ids, results


def contrastive_ent(args, labels, feature):

    feature = feature.view(-1, feature.shape[-1])
    labels = labels.view(-1)
    dot_div_temp = torch.mm(feature, feature.T) / args.cl_temp
    # dot_div_temp_norm = dot_div_temp - 1.0 / args.cl_temp
    # exp_dot_temp = torch.exp(dot_div_temp_norm) + 1e-8
    dot_div_temo_max, _ = torch.max(dot_div_temp, dim=-1, keepdim=True)
    dot_div_temp_norm = dot_div_temp - dot_div_temo_max.detach()
    exp_dot_temp = torch.exp(dot_div_temp_norm)
    labels = labels.contiguous().view(-1, 1)
    mask = torch.eq(labels, labels.T).float()

    logits_mask = torch.scatter(
        torch.ones_like(mask),
        1,
        torch.arange(len(labels)).view(-1, 1).to(device=feature.device),
        0)

    mask = mask * logits_mask
    mask[labels.view(-1) == CrossEntropyLoss().ignore_index] = \
        torch.zeros(mask.shape[1]).to(device=feature.device)
    logits_mask[labels.view(-1) == CrossEntropyLoss().ignore_index] = \
        torch.zeros(logits_mask.shape[1]).to(device=feature.device)
    idx = torch.where(labels.view(-1) * mask.sum(dim=-1)
                      * (labels.view(-1) + 1) != 0)[0]
    if len(idx) == 0:
        return torch.tensor(0.0).to(device=feature.device)
    logits_mask = logits_mask[idx]
    exp_dot_temp = exp_dot_temp[idx]
    dot_div_temp_norm = dot_div_temp_norm[idx]
    mask = mask[idx]
    if args.ignore_pad_con:
        idx_not_pad = torch.where(labels.view(-1) != CrossEntropyLoss().ignore_index)[0]
        logits_mask = logits_mask[:, idx_not_pad]
        exp_dot_temp = exp_dot_temp[:, idx_not_pad]
        dot_div_temp_norm = dot_div_temp_norm[:, idx_not_pad]
        mask = mask[:, idx_not_pad]

    exp_logits = exp_dot_temp * logits_mask

    log_prob = dot_div_temp_norm - torch.log(exp_logits.sum(1, keepdim=True))
    mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
    scloss = - mean_log_prob_pos.mean()

    return scloss

def contrastive_ent_with_distill(args, labels, feature, prev_feature):
    feature = feature.view(-1, feature.shape[-1])
    prev_feature = prev_feature.view(-1, prev_feature.shape[-1])
    labels = labels.view(-1)
    dot_div_temp = torch.mm(feature, feature.T) / args.cl_temp
    dot_div_temp_prev = torch.mm(prev_feature, prev_feature.T) / args.cl_temp
    # dot_div_temp_norm = dot_div_temp - 1.0 / args.cl_temp
    # exp_dot_temp = torch.exp(dot_div_temp_norm) + 1e-8
    dot_div_temo_max, _ = torch.max(dot_div_temp, dim=-1, keepdim=True)
    dot_div_temp_norm = dot_div_temp - dot_div_temo_max.detach()
    exp_dot_temp = torch.exp(dot_div_temp_norm) + 1e-8
    labels = labels.contiguous().view(-1, 1)
    mask = torch.eq(labels, labels.T).float()

    logits_mask = torch.scatter(
        torch.ones_like(mask),
        1,
        torch.arange(len(labels)).view(-1, 1).to(device=feature.device),
        0)
    mask = mask * logits_mask
    mask[labels.view(-1) == CrossEntropyLoss().ignore_index] = \
        torch.zeros(mask.shape[1]).to(device=feature.device)
    logits_mask[labels.view(-1) == CrossEntropyLoss().ignore_index] = \
        torch.zeros(logits_mask.shape[1]).to(device=feature.device)
    idx = torch.where(labels.view(-1) * mask.sum(dim=-1)
                      * (labels.view(-1) + 1) != 0)[0]
    logits_mask = logits_mask[idx]
    exp_dot_temp = exp_dot_temp[idx]
    dot_div_temp_norm = dot_div_temp_norm[idx]
    mask = mask[idx]

    exp_logits = exp_dot_temp * logits_mask
    log_prob = dot_div_temp_norm - torch.log(exp_logits.sum(1, keepdim=True))
    mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
    scloss = - mean_log_prob_pos.mean()

    idx = torch.where(labels.view(-1) == 0)[0]
    student_sim = dot_div_temp[idx]
    teacher_sim = dot_div_temp_prev[idx]
    kl_function = KLDivLoss(reduction='batchmean')
    student = F.log_softmax(student_sim, dim=-1)
    teacher = F.softmax(teacher_sim, dim=-1)
    fkdloss = kl_function(student, teacher)

    loss = scloss + args.kd_lambda2 * fkdloss

    return loss, (scloss, torch.tensor(0), fkdloss)

def con_distill(args, labels, feature, prev_feature):
    feature = feature.view(-1, feature.shape[-1])
    prev_feature = prev_feature.view(-1, prev_feature.shape[-1])
    labels = labels.view(-1)
    dot_div_temp = torch.mm(feature, feature.T) / args.cl_temp
    dot_div_temp_prev = torch.mm(prev_feature, prev_feature.T) / args.cl_temp
    idx = torch.where(labels.view(-1) == 0)[0]
    student_sim = dot_div_temp[idx]
    teacher_sim = dot_div_temp_prev[idx]

    if args.ignore_pad_con:
        idx_not_pad = torch.where(labels.view(-1) != CrossEntropyLoss().ignore_index)[0]
        student_sim = student_sim[:, idx_not_pad]
        teacher_sim = teacher_sim[:, idx_not_pad]
    kl_function = KLDivLoss(reduction='batchmean')
    student = F.log_softmax(student_sim, dim=-1)
    teacher = F.softmax(teacher_sim, dim=-1)
    fkdloss = kl_function(student, teacher)
    return fkdloss

def con_distill_entropy(args, labels, feature, prev_feature):
    feature = feature.view(-1, feature.shape[-1])
    prev_feature = prev_feature.view(-1, prev_feature.shape[-1])
    labels = labels.view(-1)
    dot_div_temp = torch.mm(feature, feature.T) / args.cl_temp
    dot_div_temp_prev = torch.mm(prev_feature, prev_feature.T) / args.cl_temp
    idx = torch.where(labels.view(-1) == 0)[0]
    student_sim = dot_div_temp[idx]
    teacher_sim = dot_div_temp_prev[idx]

    if args.ignore_pad_con:
        idx_not_pad = torch.where(labels.view(-1) != CrossEntropyLoss().ignore_index)[0]
        student_sim = student_sim[:, idx_not_pad]
        teacher_sim = teacher_sim[:, idx_not_pad]

    target = F.softmax(teacher_sim, dim=1)
    source = F.log_softmax(student_sim, dim=1)
    fkdloss = torch.sum(-source * target, dim=1)
    fkdloss = torch.mean(fkdloss)

    return fkdloss





def contrastive_loss(args,
                     feature,
                     labels,
                     proto_features,
                     prev_feature):

    feature = feature.view(-1, feature.shape[-1])

    labels = labels.view(-1)

    idx = torch.where(labels != CrossEntropyLoss().ignore_index)[0]
    feature = feature[idx]
    if prev_feature is not None:
        prev_feature = prev_feature.view(-1, prev_feature.shape[-1])
        prev_feature = prev_feature[idx]
    labels = labels[idx]

    dot_div_temp = torch.mm(feature, proto_features.T) / args.cl_temp  # [batch_size, rel_num]
    dot_div_temp_norm = dot_div_temp - 1.0 / args.cl_temp
    exp_dot_temp = torch.exp(dot_div_temp_norm) + 1e-8  # avoid log(0)


    mask = torch.zeros_like(exp_dot_temp).to(args.device)
    mask.scatter_(1, labels.unsqueeze(1), 1.0)
    cardinalities = torch.sum(mask, dim=1)


    log_prob = - torch.log(exp_dot_temp / torch.sum(exp_dot_temp, dim=1, keepdim=True))
    scloss_per_sample = torch.sum(log_prob * mask, dim=1) / cardinalities
    # 当前任务关系的对比损失
    scloss = torch.mean(scloss_per_sample)

    if prev_feature is not None:
        with torch.no_grad():
            prev_proto_features = proto_features[:proto_features.shape[1]-args.per_types]
            prev_sim = F.softmax(torch.mm(feature, prev_proto_features.T) / args.cl_temp / args.kd_temp, dim=1)
            # 新模型中旧的关系的概率
            prob = F.softmax(torch.mm(feature, proto_features.T) / args.cl_temp / args.kd_temp, dim=1)
            focal_weight = 1.0 - torch.gather(prob, dim=1, index=labels.unsqueeze(1)).squeeze()
            focal_weight = focal_weight ** args.gamma

            target = F.softmax(torch.mm(prev_feature, prev_proto_features.T) / args.cl_temp, dim=1) # [batch_size, prev_rel_num]

        source = F.log_softmax(torch.mm(feature, prev_proto_features.T) / args.cl_temp, dim=1) # [batch_size, prev_rel_num]
        target = target * prev_sim + 1e-8
        fkdloss = torch.sum(-source * target, dim=1)
        fkdloss = torch.mean(fkdloss * focal_weight)

    else:
        fkdloss = torch.tensor(0.).to(args.device)

        # 三元组损失
    if proto_features is not None:
        with torch.no_grad():
            sim = torch.mm(feature, proto_features.T)
            neg_sim = torch.scatter(sim, 1, labels.unsqueeze(1), -10.0)
            neg_indices = torch.argmax(neg_sim, dim=1)

        pos_proto = proto_features[labels]
        neg_proto = proto_features[neg_indices]

        positive = torch.sum(feature * pos_proto, dim=1)
        negative = torch.sum(feature * neg_proto, dim=1)

        marginloss = torch.maximum(args.margin - positive + negative, torch.zeros_like(positive).to(args.device))
        marginloss = torch.mean(marginloss)
    else:
        marginloss = 0.0

    loss = scloss + args.cl_lambda * marginloss + args.kd_lambda2 * fkdloss
    return loss, (scloss, marginloss, fkdloss)



def train(
        model_path,
        labels_list,
        pad_token_id,
        output_dir,
        data_dir,
        prev_data_dir,
        step_id,
        logger,
        ):
    print("=======================step: {}=======================".format(step_id))



    prev_encoder = None
    prev_classifier = None


    config = BertConfig.from_pretrained(args.base_model_path)
    config.num_labels = len(labels_list)
    tokenizer = BertTokenizer.from_pretrained(args.base_model_path)

    prev_dev_dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=pad_label_id,
        data_dir=prev_data_dir,
        mode="test",
        logger=logger)



    model = BertEncoder(args, tokenizer)
    classifier = Classifier(args, args.hidden_dim, len(labels_list))

    if args.usept2:
        if args.server == "108" or (args.server == "107" and args.device == "cuda:0"):
            print("use pytorch2.0")
            classifier = torch.compile(classifier)
            model = torch.compile(model)

    model.to(args.device)
    classifier.to(args.device)



    if step_id != 0:
        if args.no_start_with_bert_base and args.start_task == step_id:
            mp = model_path
            if args.server == "108":
                model_path = "/data/livosr/pythonproject/cl_ner/results/2023-10-11 09:45:36/task_0"
            elif args.server == "107":
                model_path = "/home/livosr/pythonproject/cl_ner/conll03_results/2023-10-13 13:54:36/task_0"
            with open(args.best_file, "a") as f:
                f.write("base_model_path: {}\n".format(model_path))
        print("================loading prev model================")
        print("loading encoder ... from ", model_path + "/encoder.pt")
        model_state_dict = torch.load(model_path + "/encoder.pt")
        prev_classifier = Classifier(args, args.hidden_dim, len(labels_list) - args.per_types)
        print("loading classifier ... from ", model_path + "/classifier.pt")
        classifier_state_dict = torch.load(model_path + "/classifier.pt")
        if args.usept2 and (args.server == "108" or (args.server == "107" and args.device == "cuda:0")):
            prev_classifier = torch.compile(prev_classifier)
            if not "_orig_mod." in list(model_state_dict.keys())[0]:
                model_state_dict = {"_orig_mod." + k: v for k, v in model_state_dict.items()}
                classifier_state_dict = {"_orig_mod." + k: v for k, v in classifier_state_dict.items()}
        else:
            if "_orig_mod." in list(model_state_dict.keys())[0]:
                model_state_dict = {k.split("_orig_mod.")[-1]: v for k, v in model_state_dict.items()}
                classifier_state_dict = {k.split("_orig_mod.")[-1]: v for k, v in classifier_state_dict.items()}
        model.load_state_dict(model_state_dict)
        prev_encoder = model
        prev_classifier.load_state_dict(classifier_state_dict)
        prev_classifier.to(args.device)
        if args.no_start_with_bert_base and args.start_task == step_id:
            model_path = mp

    train_dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=pad_label_id,
        data_dir=data_dir,
        mode="train",
        logger=logger)

    if args.using_memory == True:
        print("====================Using memory======================")
        memory_dataset = load_and_cache_examples(
            args=args,
            tokenizer=tokenizer,
            labels=labels_list,
            pad_id=pad_label_id,
            data_dir=data_dir,
            mode="memory",
            logger=logger)

        memory_o_dataset = load_and_cache_examples(
            args=args,
            tokenizer=tokenizer,
            labels=labels_list,
            pad_id=pad_label_id,
            data_dir=data_dir,
            mode="memory_o",
            logger=logger)

        tensors2 = memory_dataset.tensors
        tensors3 = memory_o_dataset.tensors
        merged_tensors = tuple(torch.cat([t1, t2], dim=0) for t1, t2 in zip(tensors2, tensors3))
        merged_dataset = TensorDataset(*merged_tensors)

    if step_id > 0 and args.relabel:

        macro_f1, micro_f1, preds_list, preds, out_label_ids, results = evaluate(
            args=args,
            model=prev_encoder,
            classifier=prev_classifier,
            valdata=train_dataset,
            proto_features=None,
            tsne=False
        )

        for i in range(len(preds)):
            current_label = train_dataset.__getitem__(i)[3]
            re_label = torch.tensor(preds[i])
            idx = torch.where(current_label != CrossEntropyLoss().ignore_index)[0]
            current_label_no_pad = current_label[idx]
            re_label_no_pad = re_label[idx]
            pre_cur, pre_re = 0, 0
            cur_spans, re_spans = [], []
            for j in range(len(current_label_no_pad)):
                if current_label_no_pad[j] != 0:
                    if pre_cur == 0:
                        start = j
                    elif pre_cur != 0 and pre_cur != current_label_no_pad[j]:
                        end = j
                        cur_spans.append((start, end))
                    pre_cur = current_label_no_pad[j]
                else:
                    if pre_cur != 0:
                        end = j
                        cur_spans.append((start, end))
                    pre_cur = 0

                if re_label_no_pad[j] != 0:
                    if pre_re == 0:
                        start_ = j
                    elif pre_re != 0 and pre_re != re_label_no_pad[j]:
                        end_ = j
                        re_spans.append((start_, end_))
                    pre_re = re_label_no_pad[j]
                else:
                    if pre_re != 0:
                        end_ = j
                        re_spans.append((start_, end_))
                    pre_re = 0
            if len(re_spans) != 0:
                for span in re_spans:
                    replace = True
                    for cur_span in cur_spans:
                        if not (span[0] > cur_span[1] or span[1] < cur_span[0]):
                            replace = False
                            break
                    if replace:
                        current_label_no_pad[span[0]: span[1]] = re_label_no_pad[span[0]: span[1]]
                current_label[idx] = current_label_no_pad
                train_dataset.tensors[3][i] = current_label

    if args.using_memory == True:
        print("====================dataset concat======================")
        tensors1 = train_dataset.tensors
        tensors2 = merged_dataset.tensors
        merged_tensors = tuple(torch.cat([t1, t2], dim=0) for t1, t2 in zip(tensors1, tensors2))
        train_dataset = TensorDataset(*merged_tensors)




    dev_dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=pad_label_id,
        data_dir=data_dir,
        mode="dev",
        logger=logger)

    test_dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=pad_label_id,
        data_dir=data_dir,
        mode="test",
        logger=logger)

    current_encoder = model
    current_classifier = classifier


    current_encoder = train_current_task(
        args=args,
        encoder=current_encoder,
        prev_encoder=prev_encoder,
        classifier=current_classifier,
        prev_classifier=prev_classifier,
        train_dataset=train_dataset,
        prev_dev_dataset=prev_dev_dataset,
        dev_dataset=dev_dataset,
        test_dataset=test_dataset,
        labels_list=labels_list,
        output_dir=output_dir,
        task_id=step_id
     )

    # current_encoder.load_state_dict(torch.load(os.path.join(output_dir, "encoder.pt")))
    # current_classifier.load_state_dict(torch.load(os.path.join(output_dir, "classifier.pt")))
    torch.save(args, os.path.join(output_dir, "training_args.bin"))



    return current_encoder, \
           current_classifier, \


def test(args, step_id, data_dir, labels_list):
    print("=======================test for step: {}=======================".format(step_id))
    model_path = args.output_dir + "/task_{}".format(step_id)
    data_dir = args.data_dir

    config = BertConfig.from_pretrained(args.base_model_path)
    config.num_labels = len(labels_list)
    tokenizer = BertTokenizer.from_pretrained(args.base_model_path)

    dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=-100,
        data_dir=data_dir,
        mode="test",
        logger=logger)

    model = BertEncoder(args, tokenizer)
    classifier = Classifier(args, args.hidden_dim, len(labels_list))

    if args.usept2:
        if args.server == "108" or (args.server == "107" and args.device == "cuda:0"):
            print("use pytorch2.0")
            classifier = torch.compile(classifier)
            model = torch.compile(model)

    model.to(args.device)
    classifier.to(args.device)




    print("================loading prev model================")
    print("loading encoder ... from ", model_path + "/encoder.pt")
    model_state_dict = torch.load(model_path + "/encoder.pt")
    classifier = Classifier(args, args.hidden_dim, len(labels_list))
    print("loading classifier ... from ", model_path + "/classifier.pt")
    classifier_state_dict = torch.load(model_path + "/classifier.pt")
    if args.usept2 and (args.server == "108" or (args.server == "107" and args.device == "cuda:0")):
        prev_classifier = torch.compile(classifier)
        if not "_orig_mod." in list(model_state_dict.keys())[0]:
            model_state_dict = {"_orig_mod." + k: v for k, v in model_state_dict.items()}
            classifier_state_dict = {"_orig_mod." + k: v for k, v in classifier_state_dict.items()}
    else:
        if "_orig_mod." in list(model_state_dict.keys())[0]:
            model_state_dict = {k.split("_orig_mod.")[-1]: v for k, v in model_state_dict.items()}
            classifier_state_dict = {k.split("_orig_mod.")[-1]: v for k, v in classifier_state_dict.items()}
    model.load_state_dict(model_state_dict)
    encoder = model
    classifier.load_state_dict(classifier_state_dict)
    classifier.to(args.device)
    proto_types, proto_features = get_prototypes(
        args,
        model,
        dataset,
        labels_list,
        data_dir=None,
    )

    best_f1 = 0
    best_alpha = 0
    i = 0
    while i <= 1:
        args.alpha = i
        macro_f1, micro_f1, preds_list, preds, out_label_ids, results = evaluate(
            args, encoder, classifier, dataset, proto_features=proto_features)
        with open("alpha.txt", "a") as f:
            f.write("alpha: {}, macro_f1: {}, micro_f1: {}\n".format(i, macro_f1, micro_f1))
        if micro_f1["f1"] > best_f1:
            best_f1 = micro_f1["f1"]
            best_alpha = i
        i += 0.01

    with open("alpha.txt", "a") as f:
        f.write("best alpha: {}, best f1: {}\n".format(best_alpha, best_f1))
    # get_f1(labels_list, preds, out_label_ids)
    return micro_f1



def get_prototypes(
        args,
        model,
        train_set,
        labels_list,
        data_dir=None,
):
    if data_dir is None:
        data_dir = args.data_dir

    pad_label_id = CrossEntropyLoss().ignore_index
    model.eval()
    memory_dataset = load_and_cache_examples(args=args,
                                             tokenizer=tokenizer,
                                             labels=labels_list,
                                             pad_id=pad_label_id,
                                             data_dir=data_dir,
                                             mode="memory",
                                             logger=logger)
    support_loader = DataLoader(memory_dataset,
                                batch_size=args.batch_size,
                                drop_last=False)
    train_dataset = load_and_cache_examples(args=args,
                                             tokenizer=tokenizer,
                                             labels=labels_list,
                                             pad_id=pad_label_id,
                                             data_dir=data_dir,
                                             mode="train",
                                             logger=logger)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              drop_last=False)
    memory_o_dataset = load_and_cache_examples(args=args,
                                               tokenizer=tokenizer,
                                               labels=labels_list,
                                               pad_id=pad_label_id,
                                               data_dir=data_dir,
                                               mode="memory_o",
                                               logger=logger)
    support_o_loader = DataLoader(memory_o_dataset,
                                  batch_size=args.batch_size,
                                  drop_last=False)
    support_encodings, support_features, support_labels = \
        get_support_encodings_and_labels(
            model,
            support_loader,
            support_o_loader,
            train_loader)

    proto_types, proto_features = get_exemplar_means(
        args=args,
        support_reps=support_encodings,
        support_labels=support_labels,
        labels_list=labels_list,
        model=model)




    return proto_types, proto_features


def get_exemplar_means(
        args,
        support_reps,
        support_labels,
        labels_list,
        model):

    hidden_tensors = []
    with torch.no_grad():
        for i in range(len(labels_list)):
            idx = torch.where(support_labels == i)[0]
            hidden_proto = torch.mean(support_reps[idx], dim=0)
            hidden_tensors.append(hidden_proto)
        hidden_tensors = torch.stack(hidden_tensors, dim=0)
        hidden_tensors = torch.nn.LayerNorm([args.hidden_dim]).to(args.device)(hidden_tensors)
        feature_tensors = model.get_low_dim_feature(hidden_tensors)
    return hidden_tensors, feature_tensors


def get_support_encodings_and_labels(enc,
                                     support_loader,
                                     support_o_loader,
                                     train_loader):
    support_encodings, support_labels = [], []
    support_features = []
    support_iterator = tqdm(support_loader, desc="Memory data representations", position=0)
    for index, batch in enumerate(support_iterator):
        for i in range(len(batch)):
            batch[i] = batch[i].to(args.device)
        with torch.no_grad():
            encodings, features = enc(
                input_ids=batch[0],
                attention_mask=batch[1])
            labels = batch[3]
            encodings = encodings.view(-1, encodings.shape[-1])
            features = features.view(-1, features.shape[-1])
            labels = labels.flatten()
            # 去除pad的token，去除o的token
            idx = torch.where(labels * (labels - CrossEntropyLoss().ignore_index) != 0)[0]
            support_encodings.append(encodings[idx])
            support_features.append(features[idx])
            support_labels.append(labels[idx])
        torch.cuda.empty_cache()

    support_o_iterator = tqdm(support_o_loader, desc="Memory_o data representations", position=0)
    for index, batch in enumerate(support_o_iterator):
        for i in range(len(batch)):
            batch[i] = batch[i].to(args.device)
        with torch.no_grad():
            encodings, features = enc(
                input_ids=batch[0],
                attention_mask=batch[1])
            labels = batch[3]
            encodings = encodings.view(-1, encodings.shape[-1])
            features = features.view(-1, features.shape[-1])
            labels = labels.flatten()
            # 去除pad的token
            idx = torch.where(labels - CrossEntropyLoss().ignore_index != 0)[0]
            encodings = encodings[idx]
            features = features[idx]
            labels = labels[idx]
            # 只保留O的token
            support_encodings.append(encodings[labels == 0])
            support_features.append(features[labels == 0])
            support_labels.append(labels[labels == 0])
        torch.cuda.empty_cache()
    return torch.cat(support_encodings), torch.cat(support_features), torch.cat(support_labels)

if __name__ == "__main__":
    logger = logging.getLogger(__name__)

    args = argparse.ArgumentParser()
    args.add_argument("--seed", type=int, default=20130824)

    args.add_argument("--debug", default=False)

    args.add_argument("--dataset", type=str, default="conll")
    args.add_argument("--conll_train_path", type=str, default="./conll03/tasks/train_fg_1_pg_1.pth")


    args.add_argument("--server", type=str, default="107")
    args.add_argument("--device", type=str, default="cuda:0")
    args.add_argument("--model_name_or_path", type=str, default="bert-base-uncased")
    args.add_argument("--start_task", type=int, default=1)
    args.add_argument("--total_tasks", type=int, default=11)
    args.add_argument("--per_types", type=int, default=6)
    args.add_argument("--data_dir", type=str, default="./data/tasks")
    args.add_argument("--output_dir", type=str, default="./results")
    args.add_argument("--hidden_dim", type=int, default=768)
    args.add_argument("--feature_dim", type=int, default=64)
    args.add_argument("--batch_size", type=int, default=8)
    args.add_argument("--encoder_lr", type=float, default=1e-5)
    args.add_argument("--classifier_lr", type=float, default=1e-3)
    args.add_argument("--adam_eps", type=float, default=1e-6)
    args.add_argument("--current_epoch", type=int, default=15)
    args.add_argument("--cl_temp", default=0.1, type=float, help="对比学习的温度系数")
    args.add_argument("--kd_temp", default=0.5, type=float, help="对比学习的温度系数")
    args.add_argument("--margin", default=0.15, type=float, help="三元组损失的Ω")
    args.add_argument("--alpha", default=0.8, type=float, help="线性输出的权重系数")
    args.add_argument("--beta", default=0.2, type=float, help="更新prototype时候新的比例")
    args.add_argument("--gamma", default=2.0, type=float, help="focal的幂")
    args.add_argument("--cl_lambda", default=0.8, type=float, help="对比损失中三元组的权重")
    args.add_argument("--kd_lambda1", default=0.7, type=float, help="线性损失中蒸馏的权重")
    args.add_argument("--kd_lambda2", default=2.0, type=float, help="对比损失中蒸馏的权重")
    args.add_argument("--max_seq_length", type=int, default=128)
    args.add_argument("--report_file", type=str, default="record.txt")
    args.add_argument("--best_file", type=str, default="best_f1.txt")
    args.add_argument("--kd_percent", default=2.0, type=float, help="线性损失中蒸馏的权重")

    args.add_argument("--use_con_current", default=True)
    args.add_argument("--ignore_pad_con", default=False)
    args.add_argument("--relabel", default=False)
    args.add_argument("--use_kd", default=True)
    args.add_argument("--use_con_article", default=True)
    args.add_argument("--con_start_epoch", type=int, default=0)


    args.add_argument("--use_scloss", default=True)
    args.add_argument("--use_con_distill", default=True)
    args.add_argument("--use_con_distill_entropy", default=False)
    args.add_argument("--early_stop", type=int, default=6)

    args.add_argument("--usept2", default=False)

    args.add_argument("--no_start_with_bert_base", default=True)
    args.add_argument("--using_memory", default=False)

    args.add_argument("--just_test", default=False)
    args.add_argument("--test_task", type=int, default=1)


    args.add_argument("--adjust_kd_lambda2", default=False)
    args.add_argument("--adjust_kd_percent", default=False)





    args = args.parse_args()

    if args.seed: # 固定随机种子
        print("setting")
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)





    if args.use_scloss == False:
        args.alpha = 1.0


    if args.dataset == "conll":
        args.data_dir = "./conll03/tasks"
        args.total_tasks = 4
        args.per_types = 1
        args.output_dir = "./conll03_results"

    print("using memory: ", args.using_memory)


    print("using pytorch2.0: ", args.usept2)
    if args.start_task == 0:
        args.no_start_with_bert_base = False


    # tuner_params = nni.get_next_parameter()
    # args = merge_parameter(args, tuner_params)
    if args.server == "107":
        if args.dataset == "conll":
            args.base_model_path = "/home/livosr/bert-base-cased"
        else:
            args.base_model_path = "/home/livosr/bert-base-uncased"
    elif args.server == "108":
        if args.dataset == "conll":
            args.base_model_path = "/data/livosr/pretrained_model/bert-base-cased"
        else:
            args.base_model_path = "/data/livosr/pretrained_model/bert-base-uncased"
    else:
        raise Exception("Server must be 107 or 108.")

    tokenizer = BertTokenizer.from_pretrained(args.base_model_path)

    if not args.just_test:
        now = datetime.now()
        formatted_time = now.strftime('%Y-%m-%d %H:%M:%S')
        args.output_dir = args.output_dir + "/{}".format(formatted_time)
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir)
        args.report_file = os.path.join(args.output_dir, args.report_file)
        args.best_file = os.path.join(args.output_dir, args.best_file)
        args_file = os.path.join(args.output_dir, "hyper.txt")
        current_seed = torch.initial_seed()
        print(current_seed)
        with open(args_file, 'w') as f:
            for arg in vars(args):
                f.write("{}: {}\n".format(arg, getattr(args, arg)))
            f.write("seed: {}\n".format(current_seed))
    else:
        args.output_dir = "/data/livosr/pythonproject/cl_ner/results/2023-10-11 09:45:36"





    current_encoder = None
    current_classifier = None
    task_prototypes = None
    task_proto_features = None
    prototypes = None
    proto_features = None
    prev_encoder = None

    if args.just_test:
        labels_list = get_labels_dy(args.data_dir, args.per_types, args.test_task)
        args.data_dir = args.data_dir + "/task_{}".format(args.test_task)

        test(args, args.test_task, args.data_dir, labels_list)
        exit(0)

    args.kd_percent = 2.0
    args.kd_lambda2 = 0.0
    while args.kd_percent <= 2.0:

        for step_id in range(args.start_task, args.total_tasks):
            labels_list = get_labels_dy(args.data_dir, args.per_types, step_id)
            pad_label_id = CrossEntropyLoss().ignore_index

            if step_id == 0:
                model_path = args.base_model_path
            else:
                model_path = args.output_dir + "/task_{}".format(step_id - 1)
            output_dir = args.output_dir + "/task_{}".format(step_id)
            data_dir = args.data_dir + "/task_{}".format(step_id)
            if step_id > 0:
                prev_data_dir = args.data_dir + "/task_{}".format(step_id - 1)
            else:
                prev_data_dir = args.data_dir + "/task_{}".format(step_id)

            with open(args.best_file, "a") as f:
                f.write("use_con_current: {} ignore_pad_con: {}\n"
                        "relabel: {} use_kd: {} \n"
                        "use_con_article: {} use_scloss: {} \n"
                        "use_con_distill: {} use_con_distill_entropy: {} \n"
                        "kd_lambda2: {} kd_percent: {:.4f} \n"
                        "alpha: {} \n"
                        "\n".format(args.use_con_current, args.ignore_pad_con, args.relabel, args.use_kd,
                                    args.use_con_article, args.use_scloss,
                                    args.use_con_distill, args.use_con_distill_entropy,
                                    args.kd_lambda2, args.kd_percent, args.alpha))

            train(
                model_path=model_path,
                labels_list=labels_list,
                pad_token_id=pad_label_id,
                output_dir=output_dir,
                data_dir=data_dir,
                prev_data_dir=prev_data_dir,
                step_id=step_id,
                logger=logger,
            )

        args.kd_percent += 0.2





