import random import re import xml.etree.ElementTree as ET from typing import Tuple, List, Set from tqdm import tqdm import csv import json import os import datasets # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ GENIA Term corpus """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "http://www.geniaproject.org/genia-corpus/term-corpus" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URLS = "http://www.nactem.ac.uk/GENIA/current/GENIA-corpus/Term/GENIAcorpus3.02.tgz" def _split_files(data_dir): root = ET.parse(os.path.join(data_dir, "GENIA_term_3.02", "GENIAcorpus3.02.xml")).getroot() articles = root.findall(".//article") train_root = ET.Element("set") dev_root = ET.Element("set") test_root = ET.Element("set") for a in articles: root.remove(a) random.shuffle(articles) for a in articles[:1600]: train_root.append(a) for a in articles[1600:1800]: dev_root.append(a) for a in articles[1800:]: test_root.append(a) ET.ElementTree(train_root).write(os.path.join(data_dir, "train.xml")) ET.ElementTree(dev_root).write(os.path.join(data_dir, "dev.xml")) ET.ElementTree(test_root).write(os.path.join(data_dir, "test.xml")) class GENIATermCorpus(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("0.9.0") pattern = re.compile(r"[,\.;:\[\]\(\)]") def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "folded_tokens": datasets.Sequence(datasets.Value("string")), "labels": datasets.Sequence(datasets.Value("string")) # datasets.features.ClassLabel( # names=["O", ] # ) # ) } ) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and # specify them. They'll be used if as_supervised=True in builder.as_dataset. # supervised_keys=("sentence", "label"), # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): data_dir = dl_manager.download_and_extract(_URLS) # Split the dataset files in train/dev/test _split_files(data_dir) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, "train.xml"), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, "test.xml"), "split": "test" }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "filepath": os.path.join(data_dir, "dev.xml"), "split": "dev", }, ), ] def _generate_examples(self, filepath:str, split): root = ET.parse(filepath) articles = root.findall(".//article") for idx, article in enumerate(articles): article_id, data= self.parse_article(article) for sen_ix, (tokens, entities) in enumerate(data): yield f"{split}_{idx}_{sen_ix}", { "tokens": tokens, "folded_tokens": [t.lower() for t in tokens], "labels": entities } def parse_article(self, article:ET): # Get the id of the article article_id = article.find("./articleinfo/bibliomisc").text # Select all sentences in the article object sentences = article.findall(".//sentence") data = list() for sentence in sentences: data.append(self. build_bio_tags(*self.flatten_tree(sentence))) return article_id, data def build_bio_tags(self, text_segments:List[str], entities:List[str]) -> Tuple[List[str], List[str]]: # Hacky tokenizer tokens, tags = list(), list() for seg, entity in zip(text_segments, entities): # Insert whitespaces seg = self.pattern.sub(r" \g<0> ", seg).strip() # Remove trailing whitespaces t = seg.split() tokens.extend(t) tags.extend( [f"B-{entity}"] + [f"I-{entity}"] * (len(t) - 1) if entity != "O" else ["O"] * len(t)) return tokens, tags def flatten_tree(self, elem:ET) -> Tuple[List[str], List[str]]: # Just keep the simple (not the nested) annotations text_segments, entities = list(), list() if elem.text: text_segments.append(elem.text) if elem.tag == "cons" and "sem" in elem.attrib: tag = elem.attrib['sem'].replace("G#", "") else: tag = "O" entities.append(tag) for child in elem: c_segments, c_entities = self.flatten_tree(child) text_segments.extend(c_segments) entities.extend(c_entities) if elem.tail and elem.tail != '\n': text_segments.append(elem.tail) entities.append("O") return text_segments, entities