import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
from datasets import load_metric
from torch.nn import KLDivLoss, CrossEntropyLoss
from torch.utils.data import DataLoader
from tqdm import tqdm

from fiscore import get_f1
from metric import compute_metrics
# from tsne import tsne_visualization


def evaluate(
        args,
        model,
        classifier,
        valdata,
        task_id,
        labels_list,
        proto_features=None,
        tsne=False,
        cal_loss=True,
        relabel=False,
        mode="dev"
):
    loss_func = KLDivLoss(reduction="batchmean")
    LogSoftmax = nn.LogSoftmax(dim=-1)
    pad_label_id = CrossEntropyLoss().ignore_index
    pred_labels = []
    ground_truth = []
    bi_ground_truth = []
    model.eval()
    dataloader = DataLoader(valdata,
                            batch_size=args.dev_batch_size,
                            shuffle=False,
                            drop_last=False)

    reps = np.array([])
    reps_labels = []

    mean_ce_loss = 0
    mean_kd_loss = 0
    mean_fkd_loss = 0
    mean_con_loss = 0
    word_ids_list = []

    for i in range(len(valdata)):
        word_ids_list.append(valdata.__getitem__(i)[0].detach().cpu().numpy())


    bar = tqdm(dataloader, desc="Evaluating")

    for i, batch in enumerate(bar):
        for i in range(len(batch)):
            batch[i] = batch[i].to(args.device)

        with torch.no_grad():

            hidden, feature = model.forward(
                input_ids=batch[0],
                attention_mask=batch[1])
            if cal_loss:
                ce_loss, logits = classifier.forward(hidden, batch[3])
            else:
                logits = classifier.forward(hidden, None)[0]
            prob_cls = F.softmax(logits, dim=-1)

            if proto_features is not None:
                feature = feature.view(-1, feature.shape[-1])
                logits = torch.mm(feature, proto_features.T) / args.cl_temp
                prob_ncm = F.softmax(logits, dim=-1)
                prob_ncm = prob_ncm.view(prob_cls.shape[0], -1, prob_ncm.shape[-1])
                prob = torch.zeros_like(prob_cls)
                if args.test_use_other_proto:
                    prob = prob_cls * args.alpha + prob_ncm * (1 - args.alpha)
                else:
                    prob[:, :, 0] = prob_cls[:, :, 0]
                    prob[:, :, 1:] = prob_cls[:, :, 1:] * args.alpha + prob_ncm * (1 - args.alpha)
            else:
                prob = prob_cls

            if tsne:
                x = hidden.view(-1, hidden.shape[-1])
                y = batch[3].view(-1)

                x = x[y != -100]
                y = y[y != -100]

                x_np = x.detach().cpu().numpy()
                y_np = y.detach().cpu().numpy()

                if len(reps) == 0:
                    reps = x_np
                    reps_labels = y_np
                else:
                    reps = np.concatenate((reps, x_np), axis=0)
                    reps_labels = np.concatenate((reps_labels, y_np), axis=0)



        pred = torch.argmax(prob, dim=-1)
        pred_labels.append(pred)
        ground_truth.append(batch[3])
        if mode == "test":
            bi_ground_truth.append(batch[4])
        torch.cuda.empty_cache()

    loss_tup = ()

    # if tsne:
    #     tsne_visualization(reps, reps_labels, task_id, args)

    preds = torch.cat(pred_labels, dim=0)
    out_label_ids = torch.cat(ground_truth, dim=0)
    if mode == "test":
        bi_out_label_ids = torch.cat(bi_ground_truth, dim=0)

    preds = preds.detach().cpu().numpy()
    out_label_ids = out_label_ids.detach().cpu().numpy()
    if mode == "test":
        bi_out_label_ids = bi_out_label_ids.detach().cpu().numpy()

    # label_map = {i: "I-" + label for i, label in enumerate(labels_list)}
    label_map = {i: label for i, label in enumerate(labels_list)}
    label_map[0] = "O"
    print(label_map)

    bi_labels_list = ["O"]
    for label in labels_list[1:]:
        bi_labels_list.append("B-" + label)
        bi_labels_list.append("I-" + label)
    bi_label_map = {i: label for i, label in enumerate(bi_labels_list)}

    print(bi_label_map)





    out_label_list = [[] for _ in range(out_label_ids.shape[0])]

    preds_list = [[] for _ in range(out_label_ids.shape[0])]

    sens_list = [[] for _ in range(out_label_ids.shape[0])]

    # 得到去除pad的真实标签和预测标签二维列表
    for i in range(out_label_ids.shape[0]):
        prev_ol = 0
        prev_p = 0
        for j in range(out_label_ids.shape[1]):
            if out_label_ids[i, j] != pad_label_id:
                sens_list[i].append(word_ids_list[i][j])
                if out_label_ids[i, j] != 0:
                    if mode != "test":
                        if prev_ol != out_label_ids[i, j]:
                            out_label_list[i].append("B-" + label_map[out_label_ids[i][j]])
                        elif prev_ol != 0 and prev_ol == out_label_ids[i][j]:
                            out_label_list[i].append("I-" + label_map[out_label_ids[i][j]])
                        prev_ol = out_label_ids[i][j]
                    else:
                        out_label_list[i].append(bi_label_map[bi_out_label_ids[i][j]])


                else:
                    if mode != "test":
                        out_label_list[i].append(label_map[out_label_ids[i][j]])
                        prev_ol = 0
                    else:
                        out_label_list[i].append(bi_label_map[bi_out_label_ids[i][j]])



            if out_label_ids[i, j] != pad_label_id:
                if preds[i, j] != 0:
                    if prev_p != preds[i, j]:
                        preds_list[i].append("B-" + label_map[preds[i][j]])
                    elif prev_p != 0 and prev_p == preds[i][j]:
                        preds_list[i].append("I-" + label_map[preds[i][j]])
                    prev_p = preds[i][j]
                else:
                    preds_list[i].append(label_map[preds[i][j]])
                    prev_p = 0
        # if all(item == "O" for item in out_label_list[i]):
        #     continue
        # else:
        #     print("i = ", i)
        #     print(out_label_list[i])
        #     print(preds_list[i])
        #     print("\n")
    if mode == "relabel":
        return 0, 0, preds_list, preds, out_label_ids, [], loss_tup

    metric = load_metric("./Seqeval.py")

    # for i in range(10):
    #     for j in range(len(out_label_list[i])):
    #         print(out_label_list[i][j], preds_list[i][j])
    #     print("\n")

    # get_f1(labels_list, preds, out_label_ids)

    metric.add_batch(
        predictions=preds_list,
        references=out_label_list,
    )

    macro_f1, micro_f1, results = compute_metrics(metric)


    for key, values in results.items():
        if isinstance(values, dict):
            print("{}: pre= {:.4f}, rec = {:.4f}, f1 = {:.4f}, num = {}".format(key, values["precision"], values["recall"], values["f1"], values["number"]))
        else:
            print("{} = {:.4f}".format(key, values))

    print("macro: pre = {:.4f}, rec = {:.4f}, f1 = {:.4f}".
          format(macro_f1["precision"], macro_f1["recall"], macro_f1["f1"]))
    print("micro: pre = {:.4f}, rec = {:.4f}, f1 = {:.4f}".
          format(micro_f1["precision"], micro_f1["recall"], micro_f1["f1"]))
    print("=========================================")

    if mode == "train" or mode == "test":
        with open(args.output_dir + "/results_{}.txt".format(mode), "w") as f:
            for i in range(len(out_label_list)):
                if not all(item == "O" for item in out_label_list[i]):
                    sens = args.tokenizer.convert_ids_to_tokens(sens_list[i])
                    sens_des = args.tokenizer.convert_ids_to_tokens(word_ids_list[i])
                    sens_des_no_pad = [token for token in sens_des if token != "[PAD]"]
                    sens_des_no_pad = sens_des_no_pad[1:-1]
                    sens_des_no_pad_merge = merge_bert_tokens(sens_des_no_pad)
                    for j in range(len(out_label_list[i])):

                        f.write("{}\t{}\t{}\t{}\t{}\n".format(sens[j], sens_des_no_pad_merge[j], out_label_list[i][j], preds_list[i][j], sens_list[i][j]))
                    #
                    f.write("{}\n".format(sens_des_no_pad))
                    f.write("\n")

    return macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup


def merge_bert_tokens(tokens):
    merged_tokens = []
    for token in tokens:
        if token.startswith('##'):
            if merged_tokens:
                merged_tokens[-1] += token[2:]  # Remove "##" and concatenate to the previous token
        else:
            merged_tokens.append(token)
    return merged_tokens