import os

import torch
from tqdm import tqdm

def add(entities, dict_static, types, words, labels, bi_labels):
    if len(entities) != 0:
        for i in range(len(entities)):
            ent_str = ' '.join(entities[i])
            if ent_str not in dict_static[types[i]]:
                dict_static[types[i]][ent_str] = {"cnt": 1, "sentences": [words], "labels": [labels], "bi_labels": [bi_labels]}
            else:

                dict_static[types[i]][ent_str]["cnt"] += 1
                dict_static[types[i]][ent_str]["sentences"].append(words)
                dict_static[types[i]][ent_str]["labels"].append(labels)
                dict_static[types[i]][ent_str]["bi_labels"].append(bi_labels)

                sorted_a = sorted(dict_static[types[i]].items(), key=lambda item: item[1]['cnt'], reverse=True)
                sorted_dict = dict(sorted_a)
                dict_static[types[i]] = sorted_dict




def data_division(mode, now_label, task_num, name, per_types=2):
    now_bi_list = ["O"]
    for label in now_label[1:]:
        now_bi_list.extend(["B-" + label, "I-" + label])
    file_path = name + ".txt"

    guid_index = 1
    examples = []
    i = 0

    dict_static = {lab: {} for lab in now_label}



    with open(file_path, encoding="utf-8") as f:
        words = []
        labels = []
        bi_labels = []
        entities = []
        types = []
        bar = tqdm(f, desc="Loading Dataset")
        for idx, line in enumerate(bar):
            if line.startswith("-DOCSTART-") or not line.strip():
                if words:

                    add(entities, dict_static, types, words, labels, bi_labels)
                    guid_index += 1
                    words = []
                    labels = []
                    bi_labels = []
                    entities = []
                    types = []
            else:
                line = line.strip()
                splits = line.split(" ")
                if splits[0].strip():
                    words.append(splits[0])
                    if len(splits[-1]) > 1:
                        bi_lab = splits[-1].replace("\n", "")
                        lab = bi_lab.split("-")[-1]
                        bi = bi_lab.split("-")[0]
                        if bi == "B":
                            entity = []
                            entity.append(splits[0])
                            labels.append(lab)
                            bi_labels.append(bi_lab)
                        elif bi == "I":
                            entity.append(splits[0])
                            labels.append(lab)
                            bi_labels.append(bi_lab)
                        pre = lab
                    else:
                        if pre != "O":
                            entities.append(entity)
                            types.append(pre)
                        labels.append("O")
                        bi_labels.append("O")
                        pre = "O"







    with open("memory.txt", "w", encoding="utf-8") as f:
        for lab_type in now_label:
            entities = dict_static[lab_type]
            for ent in list(entities.keys())[:5]:
                print(lab_type, ent, entities[ent]["cnt"])
                length = entities[ent]["cnt"]
                random_idx = torch.randint(0, length, (1,)).item()
                sentence = entities[ent]["sentences"][1]
                label = entities[ent]["labels"][1]
                bi_label = entities[ent]["bi_labels"][1]
                label = [lab if lab in now_label[: (i + 1)*(per_types)] else "O" for lab in label]

                print(sentence)
                print(label)
                print(bi_label)

                print("\n")

                for sen, bi_lab in zip(sentence, bi_label):
                    f.write(sen + " " + bi_lab + "\n")
                f.write("\n")











label_lists = ['CARDINAL', 'DATE', 'EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'MONEY', 'NORP',
               'ORDINAL', 'ORG', 'PERCENT', 'PERSON', 'PRODUCT', 'QUANTITY', 'TIME', 'WORK_OF_ART']
per_types = 18

data_division(
    mode="train",
    now_label=label_lists,
    task_num=0,
    name="train",
    per_types=per_types)