|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
import xml.etree.ElementTree as ET |
|
from dataclasses import dataclass |
|
from typing import List |
|
|
|
import datasets |
|
|
|
from .bigbiohub import text_features |
|
from .bigbiohub import BigBioConfig |
|
from .bigbiohub import Tasks |
|
|
|
_LANGUAGES = ['English'] |
|
_PUBMED = True |
|
_LOCAL = True |
|
_CITATION = """\ |
|
@article{nentidis-etal-2017-results, |
|
title = {Results of the fifth edition of the {B}io{ASQ} Challenge}, |
|
author = { |
|
Nentidis, Anastasios and Bougiatiotis, Konstantinos and Krithara, |
|
Anastasia and Paliouras, Georgios and Kakadiaris, Ioannis |
|
}, |
|
year = 2007, |
|
journal = {}, |
|
volume = {BioNLP 2017}, |
|
doi = {10.18653/v1/W17-2306}, |
|
url = {https://aclanthology.org/W17-2306}, |
|
biburl = {}, |
|
bibsource = {https://aclanthology.org/W17-2306} |
|
} |
|
|
|
""" |
|
|
|
_DATASETNAME = "bioasq_task_c_2017" |
|
_DISPLAYNAME = "BioASQ Task C 2017" |
|
|
|
_DESCRIPTION = """\ |
|
The training data set for this task contains annotated biomedical articles |
|
published in PubMed and corresponding full text from PMC. By annotated is meant |
|
that GrantIDs and corresponding Grant Agencies have been identified in the full |
|
text of articles |
|
""" |
|
|
|
_HOMEPAGE = "http://participants-area.bioasq.org/general_information/Task5c/" |
|
|
|
_LICENSE = 'National Library of Medicine Terms and Conditions' |
|
|
|
|
|
_URLS = {_DATASETNAME: "http://participants-area.bioasq.org/datasets/"} |
|
|
|
_SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION] |
|
|
|
_SOURCE_VERSION = "1.0.0" |
|
_BIGBIO_VERSION = "1.0.0" |
|
|
|
|
|
@dataclass |
|
class BioASQTaskC2017BigBioConfig(BigBioConfig): |
|
schema: str = "source" |
|
name: str = "bioasq_task_c_2017_source" |
|
version: datasets.Version = datasets.Version(_SOURCE_VERSION) |
|
description: str = "bioasq_task_c_2017 source schema" |
|
subset_id: str = "bioasq_task_c_2017" |
|
|
|
|
|
class BioASQTaskC2017(datasets.GeneratorBasedBuilder): |
|
""" |
|
BioASQ Task C Dataset for 2017 |
|
""" |
|
|
|
DEFAULT_CONFIG_NAME = "bioasq_task_c_2017_source" |
|
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION) |
|
BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION) |
|
|
|
BUILDER_CONFIGS = [ |
|
BioASQTaskC2017BigBioConfig( |
|
name="bioasq_task_c_2017_source", |
|
version=SOURCE_VERSION, |
|
description="bioasq_task_c_2017 source schema", |
|
schema="source", |
|
subset_id="bioasq_task_c_2017", |
|
), |
|
BioASQTaskC2017BigBioConfig( |
|
name="bioasq_task_c_2017_bigbio_text", |
|
version=BIGBIO_VERSION, |
|
description="bioasq_task_c_2017 BigBio schema", |
|
schema="bigbio_text", |
|
subset_id="bioasq_task_c_2017", |
|
), |
|
] |
|
|
|
BUILDER_CONFIG_CLASS = BioASQTaskC2017BigBioConfig |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
|
|
|
|
if self.config.schema == "source": |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"document_id": datasets.Value("string"), |
|
"pmid": datasets.Value("string"), |
|
"pmcid": datasets.Value("string"), |
|
"grantList": [ |
|
{ |
|
"agency": datasets.Value("string"), |
|
} |
|
], |
|
"text": datasets.Value("string"), |
|
} |
|
) |
|
|
|
|
|
elif self.config.schema == "bigbio_text": |
|
features = text_features |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=str(_LICENSE), |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]: |
|
|
|
if self.config.data_dir is None: |
|
raise ValueError( |
|
"This is a local dataset. Please pass the data_dir kwarg to load_dataset." |
|
) |
|
else: |
|
data_dir = self.config.data_dir |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "taskCTrainingData2017.json"), |
|
"filespath": os.path.join(data_dir, "Train_Text"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "taskc_golden2.json"), |
|
"filespath": os.path.join(data_dir, "Final_Text"), |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, filespath, split): |
|
|
|
with open(filepath) as f: |
|
task_data = json.load(f) |
|
|
|
if self.config.schema == "source": |
|
for article in task_data["articles"]: |
|
|
|
with open(filespath + "/" + article["pmcid"] + ".xml") as f: |
|
text = f.read() |
|
pmid = article["pmid"] |
|
|
|
yield pmid, { |
|
"text": text, |
|
"document_id": pmid, |
|
"id": str(pmid), |
|
"pmid": pmid, |
|
"pmcid": article["pmcid"], |
|
"grantList": [ |
|
{"agency": grant["agency"]} for grant in article["grantList"] |
|
], |
|
} |
|
|
|
elif self.config.schema == "bigbio_text": |
|
|
|
for article in task_data["articles"]: |
|
|
|
with open(filespath + "/" + article["pmcid"] + ".xml") as f: |
|
xml_string = f.read() |
|
|
|
try: |
|
article_body = ET.fromstring(xml_string).find("./article/body") |
|
except ET.ParseError: |
|
|
|
|
|
xml_string = xml_string.replace( |
|
"</pmc-articleset>", |
|
|
|
'<article xmlns:xlink="http://www.w3.org/1999/xlink"' |
|
' xmlns:mml="http://www.w3.org/1998/Math/MathML"' |
|
' article-type="research-article">', |
|
) |
|
xml_string = xml_string + "</article></pmc-articleset>" |
|
article_body = ET.fromstring(xml_string).find("./article/body") |
|
|
|
text = ET.tostring(article_body, encoding="utf8", method="text") |
|
|
|
yield article["pmid"], { |
|
"text": text, |
|
"id": str(article["pmid"]), |
|
"document_id": article["pmid"], |
|
"labels": [grant["agency"] for grant in article["grantList"]], |
|
} |
|
|