|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import re |
|
from typing import Dict, Iterator, List, Tuple |
|
|
|
import bioc |
|
import datasets |
|
from bioc import biocxml |
|
|
|
from .bigbiohub import kb_features |
|
from .bigbiohub import text_features |
|
from .bigbiohub import BigBioConfig |
|
from .bigbiohub import Tasks |
|
from .bigbiohub import get_texts_and_offsets_from_bioc_ann |
|
|
|
_LANGUAGES = ['English'] |
|
_PUBMED = True |
|
_LOCAL = False |
|
_CITATION = """\ |
|
@Article{islamaj2021nlm, |
|
title={NLM-Chem, a new resource for chemical entity recognition in PubMed full text literature}, |
|
author={Islamaj, Rezarta and Leaman, Robert and Kim, Sun and Kwon, Dongseop and Wei, Chih-Hsuan and Comeau, Donald C and Peng, Yifan and Cissel, David and Coss, Cathleen and Fisher, Carol and others}, |
|
journal={Scientific Data}, |
|
volume={8}, |
|
number={1}, |
|
pages={1--12}, |
|
year={2021}, |
|
publisher={Nature Publishing Group} |
|
} |
|
""" |
|
|
|
_DATASETNAME = "nlmchem" |
|
_DISPLAYNAME = "NLM-Chem" |
|
|
|
_DESCRIPTION = """\ |
|
NLM-Chem corpus consists of 150 full-text articles from the PubMed Central Open Access dataset, |
|
comprising 67 different chemical journals, aiming to cover a general distribution of usage of chemical |
|
names in the biomedical literature. |
|
Articles were selected so that human annotation was most valuable (meaning that they were rich in bio-entities, |
|
and current state-of-the-art named entity recognition systems disagreed on bio-entity recognition. |
|
""" |
|
|
|
_HOMEPAGE = "https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vii/track-2" |
|
_LICENSE = 'Creative Commons Zero v1.0 Universal' |
|
|
|
|
|
|
|
_URLs = { |
|
"source": "https://ftp.ncbi.nlm.nih.gov/pub/lu/BC7-NLM-Chem-track/BC7T2-NLMChem-corpus_v2.BioC.xml.gz", |
|
"bigbio_kb": "https://ftp.ncbi.nlm.nih.gov/pub/lu/BC7-NLM-Chem-track/BC7T2-NLMChem-corpus_v2.BioC.xml.gz", |
|
"bigbio_text": "https://ftp.ncbi.nlm.nih.gov/pub/lu/BC7-NLM-Chem-track/BC7T2-NLMChem-corpus_v2.BioC.xml.gz", |
|
} |
|
_SUPPORTED_TASKS = [ |
|
Tasks.NAMED_ENTITY_RECOGNITION, |
|
Tasks.NAMED_ENTITY_DISAMBIGUATION, |
|
Tasks.TEXT_CLASSIFICATION, |
|
] |
|
_SOURCE_VERSION = "1.0.0" |
|
_BIGBIO_VERSION = "1.0.0" |
|
|
|
|
|
class NLMChemDataset(datasets.GeneratorBasedBuilder): |
|
"""NLMChem""" |
|
|
|
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION) |
|
BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION) |
|
|
|
BUILDER_CONFIGS = [ |
|
BigBioConfig( |
|
name="nlmchem_source", |
|
version=SOURCE_VERSION, |
|
description="NLM_Chem source schema", |
|
schema="source", |
|
subset_id="nlmchem", |
|
), |
|
BigBioConfig( |
|
name="nlmchem_bigbio_kb", |
|
version=BIGBIO_VERSION, |
|
description="NLM_Chem BigBio schema (KB)", |
|
schema="bigbio_kb", |
|
subset_id="nlmchem", |
|
), |
|
BigBioConfig( |
|
name="nlmchem_bigbio_text", |
|
version=BIGBIO_VERSION, |
|
description="NLM_Chem BigBio schema (TEXT)", |
|
schema="bigbio_text", |
|
subset_id="nlmchem", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "nlmchem_source" |
|
|
|
def _info(self): |
|
|
|
if self.config.schema == "source": |
|
|
|
features = datasets.Features( |
|
{ |
|
"passages": [ |
|
{ |
|
"document_id": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"offset": datasets.Value("int32"), |
|
"entities": [ |
|
{ |
|
"id": datasets.Value("string"), |
|
"offsets": [[datasets.Value("int32")]], |
|
"text": [datasets.Value("string")], |
|
"type": datasets.Value("string"), |
|
"normalized": [ |
|
{ |
|
"db_name": datasets.Value("string"), |
|
"db_id": datasets.Value("string"), |
|
} |
|
], |
|
} |
|
], |
|
} |
|
] |
|
} |
|
) |
|
|
|
elif self.config.schema == "bigbio_kb": |
|
features = kb_features |
|
elif self.config.schema == "bigbio_text": |
|
features = text_features |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=str(_LICENSE), |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.schema] |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir, "BC7T2-NLMChem-corpus-train.BioC.xml" |
|
), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir, "BC7T2-NLMChem-corpus-test.BioC.xml" |
|
), |
|
"split": "test", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir, "BC7T2-NLMChem-corpus-dev.BioC.xml" |
|
), |
|
"split": "dev", |
|
}, |
|
), |
|
] |
|
|
|
def _get_textcls_example(self, d: bioc.BioCDocument) -> Dict: |
|
|
|
example = {"document_id": d.id, "text": [], "labels": []} |
|
|
|
for p in d.passages: |
|
example["text"].append(p.text) |
|
for a in p.annotations: |
|
if a.infons.get("type") == "MeSH_Indexing_Chemical": |
|
example["labels"].append(a.infons.get("identifier")) |
|
|
|
example["text"] = " ".join(example["text"]) |
|
|
|
return example |
|
|
|
def _get_passages_and_entities( |
|
self, d: bioc.BioCDocument |
|
) -> Tuple[List[Dict], List[List[Dict]]]: |
|
|
|
passages: List[Dict] = [] |
|
entities: List[List[Dict]] = [] |
|
|
|
text_total_length = 0 |
|
|
|
po_start = 0 |
|
|
|
for _, p in enumerate(d.passages): |
|
|
|
eo = p.offset - text_total_length |
|
|
|
text_total_length += len(p.text) + 1 |
|
|
|
po_end = po_start + len(p.text) |
|
|
|
|
|
if p.text is None: |
|
continue |
|
|
|
dp = { |
|
"text": p.text, |
|
"type": p.infons.get("type"), |
|
"offsets": [(po_start, po_end)], |
|
"offset": p.offset, |
|
} |
|
|
|
po_start = po_end + 1 |
|
|
|
passages.append(dp) |
|
|
|
pe = [] |
|
|
|
for a in p.annotations: |
|
|
|
a_type = a.infons.get("type") |
|
|
|
|
|
if ( |
|
self.config.schema == "bigbio_kb" |
|
and a_type == "MeSH_Indexing_Chemical" |
|
): |
|
continue |
|
|
|
offsets, text = get_texts_and_offsets_from_bioc_ann(a) |
|
|
|
da = { |
|
"type": a_type, |
|
"offsets": [(start - eo, end - eo) for (start, end) in offsets], |
|
"text": text, |
|
"id": a.id, |
|
"normalized": self._get_normalized(a), |
|
} |
|
|
|
pe.append(da) |
|
|
|
entities.append(pe) |
|
|
|
return passages, entities |
|
|
|
def _get_normalized(self, a: bioc.BioCAnnotation) -> List[Dict]: |
|
""" |
|
Get normalization DB and ID from annotation identifiers |
|
""" |
|
|
|
identifiers = a.infons.get("identifier") |
|
|
|
if identifiers is not None: |
|
|
|
identifiers = re.split(r",|;", identifiers) |
|
|
|
identifiers = [i for i in identifiers if i != "-"] |
|
|
|
normalized = [i.split(":") for i in identifiers] |
|
|
|
normalized = [ |
|
{"db_name": elems[0], "db_id": elems[1]} for elems in normalized |
|
] |
|
|
|
else: |
|
|
|
normalized = [{"db_name": "-1", "db_id": "-1"}] |
|
|
|
return normalized |
|
|
|
def _generate_examples( |
|
self, |
|
filepath: str, |
|
split: str, |
|
) -> Iterator[Tuple[int, Dict]]: |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
reader = biocxml.BioCXMLDocumentReader(str(filepath)) |
|
|
|
if self.config.schema == "source": |
|
|
|
for uid, doc in enumerate(reader): |
|
|
|
passages, passages_entities = self._get_passages_and_entities(doc) |
|
|
|
for p, pe in zip(passages, passages_entities): |
|
|
|
p.pop("offsets") |
|
|
|
p["document_id"] = doc.id |
|
p["entities"] = pe |
|
|
|
yield uid, {"passages": passages} |
|
|
|
elif self.config.schema == "bigbio_text": |
|
uid = 0 |
|
for idx, doc in enumerate(reader): |
|
|
|
example = self._get_textcls_example(doc) |
|
example["id"] = uid |
|
|
|
uid += 1 |
|
|
|
yield idx, example |
|
|
|
elif self.config.schema == "bigbio_kb": |
|
uid = 0 |
|
for idx, doc in enumerate(reader): |
|
|
|
|
|
uid += 1 |
|
|
|
passages, passages_entities = self._get_passages_and_entities(doc) |
|
|
|
|
|
entities = [e for pe in passages_entities for e in pe] |
|
|
|
for p in passages: |
|
p.pop("offset") |
|
p["text"] = (p["text"],) |
|
p["id"] = uid |
|
uid += 1 |
|
|
|
for e in entities: |
|
e["id"] = uid |
|
uid += 1 |
|
|
|
|
|
|
|
|
|
yield idx, { |
|
"id": uid, |
|
"document_id": doc.id, |
|
"passages": passages, |
|
"entities": entities, |
|
"events": [], |
|
"coreferences": [], |
|
"relations": [], |
|
} |
|
|