import os import json import glob from tqdm import tqdm from io import StringIO import pandas as pd """ Steps to convert the data into JSON format. Step-0: Use a python environment where pandas is installed. Step-1: Download the source file from here: https://figshare.com/articles/dataset/MACCROBAT2018/9764942 Step-2: Unzip the file in put that into a folder (say `data` folder). All unzipped files will be present here. * data/MACCROBAT2020/*.txt * data/MACCROBAT2020/*.ann Step-3: Use the correct paths and run this file. """ def remove_overlapped_ner_tags(ner_details: list[dict]): """remove overlapping entities. Args: ner_details (List[dict]): a list of dictionary where each dictionary holds the information of a entity. NOTE: Priority is given to the entity that is labelled first after sorting all by start index in ascending order. (i.e. it's end-index is less than other start of other overlapping entity.) Returns: list[dict]: updated list (removed item if something was overlapping) """ # funtion to remove the overlapping NER-tags new_ner_details = [] ner_details = sorted(ner_details, key=lambda x: x["start"]) for i, ner_detail in enumerate(ner_details): if i == 0: start = ner_detail["start"] end = ner_detail["end"] new_ner_details.append(ner_detail) continue current_start = ner_detail["start"] current_end = ner_detail["end"] if current_start < end: continue # update the start and end start = current_start end = current_end new_ner_details.append(ner_detail) return new_ner_details def get_ner_details(ann_file): with open(ann_file, "r") as f: lines = f.readlines() lines = [line.strip() for line in lines] csv_data = "\n".join(lines) csv_data = StringIO(csv_data) df = pd.read_csv(csv_data, sep="\t", header=None) df.columns = ["EntityID", "EntityDetails", "EntityText"] # print(df.shape) # remove rows where entity-id start other than `T` df = df[df["EntityID"].apply(lambda x: str(x).strip().startswith("T"))] # remove the rows which contains the ";" in the `EntityDetails` df = df[df["EntityDetails"].apply(lambda x: ";" not in str(x))] # drop where None is present df.dropna(axis=1, inplace=True) ner_info = [] for i, row in df.iterrows(): text = row["EntityText"] details = row["EntityDetails"] try: ner_tag, start, end = details.split(" ") except: print(ann_file) print(details) start = int(float(start)) end = int(float(end)) ner_info.append({"text": text, "label": ner_tag.upper(), "start": start, "end": end}) # remove the overlapping entities ner_info = remove_overlapped_ner_tags(ner_details=ner_info) # print(ner_info) return ner_info def main(input_path: str = "data/MACCROBAT2020", output_path: str = "data/MACCROBAT2020-V2.json"): txt_files = glob.glob(os.path.join(input_path, "*.txt")) txt_files.sort() ner_data = {"data": [], "verson": "MACCROBAT-V2 (https://figshare.com/articles/dataset/MACCROBAT2018/9764942)"} for txt_file in tqdm(txt_files, desc="Extracting data..."): with open(txt_file, "r") as f: full_text = f.read() a = txt_file.replace(".txt", ".ann") ner_info = get_ner_details(a) data = {"full_text": full_text, "ner_info": ner_info} ner_data["data"].append(data) ALL_NER_LABLES = set() for details in tqdm(ner_data["data"], desc="Splitting into tokens..."): text = details["full_text"] ner_details = details["ner_info"] tokens = [] ner_labels = [] start = 0 for ner_detail in ner_details: ner_start = ner_detail["start"] ner_end = ner_detail["end"] before_ner_token = text[start:ner_start] ner_token = text[ner_start:ner_end] tokens.append(before_ner_token) ner_labels.append("O") tokens.append(ner_token) ner_labels.append(f'B-{ner_detail["label"]}') ALL_NER_LABLES.add(f'B-{ner_detail["label"]}') ALL_NER_LABLES.add(f'I-{ner_detail["label"]}') start = ner_end if len(text) >= start: ner_labels.append("O") tokens.append(text[start:]) assert len(tokens) == len(ner_labels) details["tokens"] = tokens details["ner_labels"] = ner_labels ner_data["all_ner_labels"] = sorted(list(ALL_NER_LABLES), key=lambda x: x.split("-")[-1]) label_2_index = {k: i for i, k in enumerate(ner_data["all_ner_labels"])} index_2_label = {v: k for k, v in label_2_index.items()} ner_data["label_2_index"] = label_2_index ner_data["index_2_label"] = index_2_label for details in tqdm(ner_data["data"], desc="label2index..."): ner_labels = details["ner_labels"] ner_labels_ids = [] for ner in ner_labels: ner_labels_ids.append(label_2_index.get(ner)) details["ner_labels"] = ner_labels_ids with open(output_path, "w") as f: json.dump(ner_data, f, indent=4) if __name__ == "__main__": input_path: str = "data/MACCROBAT2020" output_path: str = "data/MACCROBAT2020-V2.json" main(input_path=input_path, output_path=output_path)