import torch
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader
from tqdm import tqdm

from evaluate import evaluate

def check_labels_batch(
        preds,
        ground_truth,
):
    raw_relabels = []
    print(len(preds))
    for i in range(len(preds)):
        current_label = ground_truth[i]
        re_label = preds[i]
        idx = torch.where(current_label != CrossEntropyLoss().ignore_index)[0]
        current_label_no_pad = current_label[idx]
        re_label_no_pad = re_label[idx]
        pre_cur, pre_re = 0, 0
        cur_spans, re_spans = [], []
        for j in range(len(current_label_no_pad)):
            if current_label_no_pad[j] != 0:
                if pre_cur == 0:
                    start = j
                elif pre_cur != 0 and pre_cur != current_label_no_pad[j]:
                    end = j
                    cur_spans.append((start, end))
                pre_cur = current_label_no_pad[j]
            else:
                if pre_cur != 0:
                    end = j
                    cur_spans.append((start, end))
                pre_cur = 0

            if re_label_no_pad[j] != 0:
                if pre_re == 0:
                    start_ = j
                elif pre_re != 0 and pre_re != re_label_no_pad[j]:
                    end_ = j
                    re_spans.append((start_, end_))
                pre_re = re_label_no_pad[j]
            else:
                if pre_re != 0:
                    end_ = j
                    re_spans.append((start_, end_))
                pre_re = 0
        if len(re_spans) != 0:
            for span in re_spans:
                replace = True
                for cur_span in cur_spans:
                    if not (span[0] > cur_span[1] or span[1] < cur_span[0]):
                        replace = False
                        break
                if replace:
                    current_label_no_pad[span[0]: span[1]] = re_label_no_pad[span[0]: span[1]]
            current_label[idx] = current_label_no_pad
        raw_relabels.append(current_label)

    raw_relabels = torch.stack(raw_relabels, dim=0)

    return raw_relabels

def check_labels(
        preds,
        train_dataset,
):
    raw_relabels = []
    print(len(preds))
    for i in range(len(preds)):
        current_label = train_dataset.__getitem__(i)[3]
        re_label = torch.tensor(preds[i]).to(current_label.device)
        idx = torch.where(current_label != CrossEntropyLoss().ignore_index)[0]
        current_label_no_pad = current_label[idx]
        re_label_no_pad = re_label[idx]
        pre_cur, pre_re = 0, 0
        cur_spans, re_spans = [], []
        for j in range(len(current_label_no_pad)):
            if current_label_no_pad[j] != 0:
                if pre_cur == 0:
                    start = j
                elif pre_cur != 0 and pre_cur != current_label_no_pad[j]:
                    end = j
                    cur_spans.append((start, end))
                pre_cur = current_label_no_pad[j]
            else:
                if pre_cur != 0:
                    end = j
                    cur_spans.append((start, end))
                pre_cur = 0

            if re_label_no_pad[j] != 0:
                if pre_re == 0:
                    start_ = j
                elif pre_re != 0 and pre_re != re_label_no_pad[j]:
                    end_ = j
                    re_spans.append((start_, end_))
                pre_re = re_label_no_pad[j]
            else:
                if pre_re != 0:
                    end_ = j
                    re_spans.append((start_, end_))
                pre_re = 0
        if len(re_spans) != 0:
            for span in re_spans:
                replace = True
                for cur_span in cur_spans:
                    if not (span[0] > cur_span[1] or span[1] < cur_span[0]):
                        replace = False
                        break
                if replace:
                    current_label_no_pad[span[0]: span[1]] = re_label_no_pad[span[0]: span[1]]
            current_label[idx] = current_label_no_pad
        raw_relabels.append(current_label)

    raw_relabels = torch.stack(raw_relabels, dim=0)

    return raw_relabels

def get_relabel_proto_test(args, prev_encoder, prev_classifier, train_dataset, step_id, labels_list):
    print(labels_list)
    # macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
    #     args=args,
    #     model=prev_encoder,
    #     classifier=prev_classifier,
    #     valdata=train_dataset,
    #     proto_features=None,
    #     task_id=step_id,
    #     labels_list=labels_list,
    #     tsne=False,
    #     cal_loss=False
    # )
    #
    # raw_relabels = check_labels(
    #     preds,
    #     train_dataset)


    proto_types, proto_features, ent_num = get_prototypes_train_relabel(
        args,
        prev_encoder,
        train_dataset,
        labels_list,
        data_dir=None,
    )
    return proto_types, proto_features, ent_num

def get_relabel_proto(args, prev_encoder, prev_classifier, train_dataset, step_id, labels_list):

    macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
        args=args,
        model=prev_encoder,
        classifier=prev_classifier,
        valdata=train_dataset,
        proto_features=None,
        task_id=step_id - 1,
        labels_list=labels_list,
        tsne=False,
        cal_loss=False,
        mode="relabel"
    )

    raw_relabels = check_labels(
        preds,
        train_dataset)

    print("getting prototypes ...")
    proto_types, proto_features, ent_num = get_prototypes_train_relabel(
        args,
        prev_encoder,
        train_dataset,
        labels_list,
        step_id,
        data_dir=None,
    )
    return proto_types, proto_features, ent_num, raw_relabels

def get_o_prototypes_train_relabel(
        args,
        encodings,
        features,
        labels,
        model
):
    pad_label_id = CrossEntropyLoss().ignore_index
    encodings = encodings.view(-1, encodings.shape[-1])
    features = features.view(-1, features.shape[-1])
    labels = labels.flatten()
    # 去除pad的token
    idx = torch.where(labels - pad_label_id != 0)[0]
    encodings = encodings[idx]
    features = features[idx]
    labels = labels[idx]
    # 只保留O的token
    torch.cuda.empty_cache()

    other_types, other_features, ent_num = get_exemplar_means(
        args=args,
        support_reps=encodings,
        support_features=features,
        support_labels=labels,
        labels_list=[0],
        model=model)


    return other_types, other_features, ent_num


def get_prototypes_train_relabel(
        args,
        model,
        train_set,
        labels_list,
        task_id,
        data_dir=None,
):
    if data_dir is None:
        data_dir = args.data_dir

    pad_label_id = CrossEntropyLoss().ignore_index
    model.eval()

    train_loader = DataLoader(train_set,
                              batch_size=128,
                              shuffle=False,
                              drop_last=False)

    train_encodings, train_features, train_labels = \
        get_train_encodings_and_labels(
            args=args,
            enc=model,
            train_loader=train_loader)

    if not args.just_test:
        labels_list = labels_list[:len(labels_list) - args.per_types[task_id]]

    proto_types, proto_features, ent_num = get_exemplar_means(
        args=args,
        support_reps=train_encodings,
        support_features=train_features,
        support_labels=train_labels,
        labels_list=labels_list,
        model=model)
    if args.just_test and args.test_use_other_proto:
        return proto_types, proto_features, ent_num

    return proto_types[1:], proto_features[1:], ent_num


def get_exemplar_means(
        args,
        support_reps,
        support_features,
        support_labels,
        labels_list,
        model):

    hidden_tensors, feature_tensors = [], []
    ent_num = {i: 0 for i in range(len(labels_list))}
    with torch.no_grad():
        for i in range(len(labels_list)):
            idx = torch.where(support_labels == i)[0]
            ent_num[i] = len(idx)
            if ent_num[i] == 0:
                hidden_proto = torch.zeros_like(support_reps[0])
                feature_proto = torch.zeros_like(support_features[0])
            else:
                hidden_proto = torch.mean(support_reps[idx], dim=0)
                feature_proto = torch.mean(support_features[idx], dim=0)
            hidden_tensors.append(hidden_proto)
            feature_tensors.append(feature_proto)
        hidden_tensors = torch.stack(hidden_tensors, dim=0)
        hidden_tensors = torch.nn.LayerNorm([args.hidden_dim]).to(args.device)(hidden_tensors)
        feature_tensors = torch.stack(feature_tensors, dim=0)
        # feature_tensors = model.get_low_dim_feature(hidden_tensors)
    return hidden_tensors, feature_tensors, ent_num


def get_train_encodings_and_labels(
        args,
        enc,
        train_loader):
    pad_label = CrossEntropyLoss().ignore_index
    train_encodings, train_labels = [], []
    train_features = []
    train_iterator = tqdm(train_loader, desc="Getting training prototypes", position=0)
    for index, batch in enumerate(train_iterator):
        for i in range(len(batch)):
            batch[i] = batch[i].to(args.device)
        with torch.no_grad():
            encodings, features = enc(
                input_ids=batch[0],
                attention_mask=batch[1])
            labels = batch[3]
            encodings = encodings.view(-1, encodings.shape[-1])
            features = features.view(-1, features.shape[-1])
            labels = labels.flatten()
            # 去除pad的token
            idx = torch.where((labels - pad_label) != 0)[0]
            train_encodings.append(encodings[idx])
            train_features.append(features[idx])
            train_labels.append(labels[idx])
        torch.cuda.empty_cache()


    return torch.cat(train_encodings), torch.cat(train_features), torch.cat(train_labels)


