|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Medical BIOS""" |
|
|
|
import json |
|
import os |
|
import textwrap |
|
|
|
import datasets |
|
|
|
|
|
MAIN_CITATION = """https://aclanthology.org/2023.emnlp-main.427/""" |
|
_DESCRIPTION = """NA""" |
|
MAIN_PATH = 'https://huggingface.co/datasets/coastalcph/medical-bios/resolve/main' |
|
|
|
|
|
class MedicalBIOSConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Medical BIOS.""" |
|
|
|
def __init__( |
|
self, |
|
label_classes, |
|
url, |
|
data_url, |
|
citation, |
|
**kwargs, |
|
): |
|
"""BuilderConfig for Medical BIOS. |
|
Args: |
|
label_classes: `list`, list of label classes |
|
url: `string`, url for the original project |
|
data_url: `string`, url to download the zip file from |
|
data_file: `string`, filename for data set |
|
citation: `string`, citation for the data set |
|
url: `string`, url for information about the data set |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(MedicalBIOSConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
self.label_classes = label_classes |
|
self.url = url |
|
self.data_url = data_url |
|
self.citation = citation |
|
|
|
|
|
class XAIFairness(datasets.GeneratorBasedBuilder): |
|
"""Fairlex: A multilingual benchmark for evaluating fairness in legal text processing. Version 1.0""" |
|
|
|
BUILDER_CONFIGS = [ |
|
MedicalBIOSConfig( |
|
name="standard", |
|
description=textwrap.dedent( |
|
"""\ |
|
The dataset is based on the Common Crawl. Specifically, De-Arteaga et al. identified online |
|
biographies, written in English, by filtering for lines that began |
|
with a name-like pattern (i.e., a sequence of two capitalized words) |
|
followed by the string “is a(n) (xxx) title,” where title is |
|
an occupation from the BLS Standard Occupation Classification system. |
|
This version of the dataset comprises English biographies labeled with occupations. |
|
We also include a subset of biographies labeled with human rationales. |
|
""" |
|
), |
|
label_classes=['psychologist', 'surgeon', 'nurse', 'dentist', 'physician'], |
|
data_url=os.path.join(MAIN_PATH, "bios.zip"), |
|
url="https://github.com/microsoft/biosbias", |
|
citation=textwrap.dedent( |
|
"""\ |
|
@inproceedings{eberle-etal-2023-rather, |
|
title = "Rather a Nurse than a Physician - Contrastive Explanations under Investigation", |
|
author = "Eberle, Oliver and |
|
Chalkidis, Ilias and |
|
Cabello, Laura and |
|
Brandl, Stephanie", |
|
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing", |
|
year = "2023", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2023.emnlp-main.427", |
|
}""" |
|
), |
|
), |
|
MedicalBIOSConfig( |
|
name="rationales", |
|
description=textwrap.dedent( |
|
"""\ |
|
The dataset is based on the Common Crawl. Specifically, De-Arteaga et al. identified online |
|
biographies, written in English, by filtering for lines that began |
|
with a name-like pattern (i.e., a sequence of two capitalized words) |
|
followed by the string “is a(n) (xxx) title,” where title is |
|
an occupation from the BLS Standard Occupation Classification system. |
|
This version of the dataset comprises English biographies labeled with occupations. |
|
We also include a subset of biographies labeled with human rationales. |
|
""" |
|
), |
|
label_classes=['psychologist', 'surgeon', 'nurse', 'dentist', 'physician'], |
|
data_url=os.path.join(MAIN_PATH, "bios.zip"), |
|
url="https://github.com/microsoft/biosbias", |
|
citation=textwrap.dedent( |
|
"""\ |
|
@inproceedings{eberle-etal-2023-rather, |
|
title = "Rather a Nurse than a Physician - Contrastive Explanations under Investigation", |
|
author = "Eberle, Oliver and |
|
Chalkidis, Ilias and |
|
Cabello, Laura and |
|
Brandl, Stephanie", |
|
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing", |
|
year = "2023", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2023.emnlp-main.427", |
|
}""" |
|
), |
|
), |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "standard": |
|
features = {"text": datasets.Value("string"), "label": datasets.ClassLabel(names=self.config.label_classes)} |
|
else: |
|
features = {"text": datasets.Value("string"), "label": datasets.ClassLabel(names=self.config.label_classes), |
|
"foil": datasets.ClassLabel(names=self.config.label_classes), |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"rationales": datasets.Sequence(datasets.Value("int8")), |
|
"contrastive_rationales": datasets.Sequence(datasets.Value("int8")), |
|
"annotations": datasets.Sequence(datasets.Sequence(datasets.Value("int8"))), |
|
"contrastive_annotations": datasets.Sequence(datasets.Sequence(datasets.Value("int8")))} |
|
return datasets.DatasetInfo( |
|
description=self.config.description, |
|
features=datasets.Features(features), |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + MAIN_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(self.config.data_url) |
|
if self.config.name == 'standard': |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, f"train.jsonl"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "test.jsonl"), |
|
"split": "test", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, f"validation.jsonl"), |
|
"split": "val", |
|
}, |
|
), |
|
] |
|
else: |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "test_rationales.jsonl"), |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""This function returns the examples in the raw (text) form.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
data = json.loads(row) |
|
example = { |
|
"text": data["text"], |
|
"label": data["title"] |
|
} |
|
if self.config.name == "rationales": |
|
example["foil"] = data["foil"] |
|
example["words"] = data["words"] |
|
example["rationales"] = data["rationales"] |
|
example["contrastive_rationales"] = data["contrastive_rationales"] |
|
example["annotations"] = data["annotations"] |
|
example["contrastive_annotations"] = data["contrastive_annotations"] |
|
yield id_, example |