from collections import defaultdict
import os, sys

os.chdir(os.path.dirname(__file__))

seprator = "======================================================="
info = defaultdict(int)
instances = []
pre_words = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]
words = set()
final_words = []
labels = set()
original_data_path = "实验数据.txt"


def make_vocab(file=original_data_path):
    with open(file=file, mode='r', encoding="UTF-8")as f:
        lines = f.read()
        raw_instances = lines.split(seprator)
        for raw_instance in raw_instances:
            raw_instance_str = raw_instance.strip()

            if raw_instance_str:  # 最后一行可能是空的换行符
                single_instance_dict = dict()
                instance_pre = raw_instance_str.split("\n")
                formula_split_dict = dict()
                relationships = list()
                # 公式得到
                single_instance_dict[instance_pre[-1][:5]] = instance_pre[-1][5:]
                # 切分以及关系得到
                for instance_line in instance_pre[:-1]:
                    index1, word1, rel, word2, index2 = instance_line.split("#")
                    index1 = int(index1) #这里从1开始刚好后面需要一个CLS符
                    index2 = int(index2)
                    rel = int(rel)
                    formula_split_dict[index1] = word1
                    formula_split_dict[index2] = word2
                    words.update({word1, word2})
                    relationships.append((index1, index2, rel))
                len_split = len(formula_split_dict)
                info["max_words"] = max(info["max_words"], len_split)
                single_instance_dict["split_sequence_dict"] = formula_split_dict
                single_instance_dict["relationships"] = relationships
                single_instance_dict["len_split"] = len_split
                single_instance_dict["公式:"] = tuple([formula_split_dict[i] for i in range(1,len_split+1)])
                single_instance_dict["标签:"] = [0]*len_split
            instances.append(single_instance_dict)
        final_words.extend(pre_words + sorted(words))
        # 预训练选用预测字符时
        labels2id = {label: ix for ix, label in enumerate(final_words)}
        # 预训练选用判断是否替换时
        labels2id = {"original": 1, "replaced": 0}
        labels.update(labels2id.keys())
        with open("vocab.txt", 'w')as vocab_f:
            vocab_f.write("\n".join(final_words))
    final_instances = []
    for single_instance_dict in instances:
        augument_labeled_data = augument_labelled_instance(instance=single_instance_dict, real_words=words)
        final_instances.extend(augument_labeled_data)
    return {"original_instances": instances,
            "instances": final_instances,
            "final_words": final_words,
            "labels2id": labels2id,
            "max_words": info["max_words"],
            }


def augument_labelled_instance(instance, real_words):
    """
    输入一个instance，生成多个带预训练目标的instances
    :param instance:
    :return:
    """
    import random
    len_split = instance["len_split"]
    splits = tuple([instance["split_sequence_dict"][_index] for _index in range(1,len_split+1)])
    pairs_ = set()
    indices2label = tuple([0 for _ in range(len_split)])
    multiplier = len_split // 2
    for replace_index in random.sample(range(len_split),multiplier):
        candidate_words_num = len(real_words) // 3
        sequence = list(splits)
        goal_indices2label = list(indices2label)
        for replace_word in random.sample(real_words, candidate_words_num):
            sequence[replace_index] = replace_word
            goal_indices2label[replace_index] = int(replace_word != splits[replace_index])
            pairs_.add((tuple(sequence), tuple(goal_indices2label)))
    augument_labeled_instances = []
    for pair in pairs_:
        new_instance = dict(instance)
        new_instance["公式:"] = pair[0]
        new_instance["标签:"] = pair[1]
        augument_labeled_instances.append(new_instance)
    return augument_labeled_instances

if __name__ == "__main__":
    print(make_vocab()["instances"].__len__())
