pico-breast-cancer / pico-breast-cancer.py
Stardrums's picture
Update pico-breast-cancer.py
5a8f589
raw
history blame contribute delete
No virus
7.52 kB
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@InProceedings{mutinda2022pico,
title = {PICO Corpus: A Publicly Available Corpus to Support Automatic Data Extraction from Biomedical Literature},
author = {Mutinda, Faith and Liew, Kongmeng and Yada, Shuntaro and Wakamiya, Shoko and Aramaki, Eiji},
booktitle = {Proceedings of the first Workshop on Information Extraction from Scientific Publications},
pages = {26--31},
year = {2022}
}
"""
_DESCRIPTION = """\
The corpus consists of about 1,011 PubMed abstracts which are RCTs related
to breast cancer. For each abstract, text snippets that identify the
Participants, Intervention, Control, and Outcome (PICO elements) are annotated.
The abstracts were annotated using BRAT (https://brat.nlplab.org/) and later
converted to IOB format.
"""
_URL = "https://raw.githubusercontent.com/Martin-Masson/pico-breast-cancer/main/pico_iob/"
_TRAINING_FILE = "train.txt"
_DEV_FILE = "dev.txt"
_TEST_FILE = "test.txt"
_ALL_FILE = "all.txt"
_SHORT_FILE = "all_short.txt"
class PicoBreastCancerConfig(datasets.BuilderConfig):
"""BuilderConfig for PicoBreastCancer"""
def __init__(self, **kwargs):
"""BuilderConfig for PicoBreastCancer.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PicoBreastCancerConfig, self).__init__(**kwargs)
class PicoBreastCancer(datasets.GeneratorBasedBuilder):
"""A corpus of about 1,011 PubMed abstracts from RCTs related to breast cancer."""
BUILDER_CONFIGS = [
PicoBreastCancerConfig(name="split", version=datasets.Version("1.0.0"), description="Train/Validation/Test splits containing respectively 80%/10%/10% of all the abstracts."),
PicoBreastCancerConfig(name="all", version=datasets.Version("1.0.0"), description="Single dataset containing all the absracts for custom splitting."),
PicoBreastCancerConfig(name="short", version=datasets.Version("1.0.0"), description="Similar to 'all' but with shortened examples to account for embeddings size."),
]
DEFAULT_CONFIG_NAME = "split"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-total-participants",
"I-total-participants",
"B-intervention-participants",
"I-intervention-participants",
"B-control-participants",
"I-control-participants",
"B-age",
"I-age",
"B-eligibility",
"I-eligibility",
"B-ethinicity",
"I-ethinicity",
"B-condition",
"I-condition",
"B-location",
"I-location",
"B-intervention",
"I-intervention",
"B-control",
"I-control",
"B-outcome",
"I-outcome",
"B-outcome-measure",
"I-outcome-measure",
"B-iv-bin-abs",
"I-iv-bin-abs",
"B-cv-bin-abs",
"I-cv-bin-abs",
"B-iv-bin-percent",
"I-iv-bin-percent",
"B-cv-bin-percent",
"I-cv-bin-percent",
"B-iv-cont-mean",
"I-iv-cont-mean",
"B-cv-cont-mean",
"I-cv-cont-mean",
"B-iv-cont-median",
"I-iv-cont-median",
"B-cv-cont-median",
"I-cv-cont-median",
"B-iv-cont-sd",
"I-iv-cont-sd",
"B-cv-cont-sd",
"I-cv-cont-sd",
"B-iv-cont-q1",
"I-iv-cont-q1",
"B-cv-cont-q1",
"I-cv-cont-q1",
"B-iv-cont-q3",
"I-iv-cont-q3",
"B-cv-cont-q3",
"I-cv-cont-q3",
]
)
),
}
),
supervised_keys=None,
homepage="https://github.com/Martin-Masson/pico-corpus",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
"all": f"{_URL}{_ALL_FILE}",
"short": f"{_URL}{_SHORT_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
if self.config.name == "split":
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
else:
file = "all" if self.config.name == "all" else "short"
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files[file]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
idx = 0
tokens = []
ner_tags = []
lines = f.read().splitlines()
for line in lines:
if not line:
yield idx, {
"id": str(idx),
"tokens": tokens,
"ner_tags": ner_tags,
}
idx += 1
tokens = []
ner_tags = []
else:
splits = line.rstrip().rsplit(" ", 1)
tokens.append(splits[0])
ner_tags.append(splits[1])
# last example
if tokens:
yield idx, {
"id": str(idx),
"tokens": tokens,
"ner_tags": ner_tags,
}