|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Medical BIOS""" |
|
|
|
import json |
|
import os |
|
import textwrap |
|
|
|
import datasets |
|
|
|
|
|
MAIN_CITATION = """NA""" |
|
_DESCRIPTION = """NA""" |
|
MAIN_PATH = 'https://huggingface.co/datasets/coastalcph/medical-bios/resolve/main' |
|
|
|
|
|
class MedicalBIOSConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Medical BIOS.""" |
|
|
|
def __init__( |
|
self, |
|
label_classes, |
|
url, |
|
data_url, |
|
citation, |
|
**kwargs, |
|
): |
|
"""BuilderConfig for Medical BIOS. |
|
Args: |
|
label_classes: `list`, list of label classes |
|
url: `string`, url for the original project |
|
data_url: `string`, url to download the zip file from |
|
data_file: `string`, filename for data set |
|
citation: `string`, citation for the data set |
|
url: `string`, url for information about the data set |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(MedicalBIOSConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
self.label_classes = label_classes |
|
self.url = url |
|
self.data_url = data_url |
|
self.citation = citation |
|
|
|
|
|
class XAIFairness(datasets.GeneratorBasedBuilder): |
|
"""Fairlex: A multilingual benchmark for evaluating fairness in legal text processing. Version 1.0""" |
|
|
|
BUILDER_CONFIGS = [ |
|
MedicalBIOSConfig( |
|
name="standard", |
|
description=textwrap.dedent( |
|
"""\ |
|
The dataset is based on the Common Crawl. Specifically, De-Arteaga et al. identified online |
|
biographies, written in English, by filtering for lines that began |
|
with a name-like pattern (i.e., a sequence of two capitalized words) |
|
followed by the string “is a(n) (xxx) title,” where title is |
|
an occupation from the BLS Standard Occupation Classification system. |
|
This version of the dataset comprises English biographies labeled with occupations. |
|
We also include a subset of biographies labeled with human rationales. |
|
""" |
|
), |
|
label_classes=['psychologist', 'surgeon', 'nurse', 'dentist', 'physician'], |
|
data_url=os.path.join(MAIN_PATH, "bios.zip"), |
|
url="https://github.com/microsoft/biosbias", |
|
citation=textwrap.dedent( |
|
"""\ |
|
@inproceedings{10.1145/3287560.3287572, |
|
author = {De-Arteaga, Maria and Romanov, Alexey and Wallach, Hanna and Chayes, |
|
Jennifer and Borgs, Christian and Chouldechova, Alexandra and Geyik, Sahin |
|
and Kenthapadi, Krishnaram and Kalai, Adam Tauman}, |
|
title = {Bias in Bios: A Case Study of Semantic Representation Bias in a High-Stakes Setting}, |
|
year = {2019}, |
|
isbn = {9781450361255}, |
|
publisher = {Association for Computing Machinery}, |
|
address = {New York, NY, USA}, |
|
url = {https://doi.org/10.1145/3287560.3287572}, |
|
doi = {10.1145/3287560.3287572}, |
|
booktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency}, |
|
pages = {120–128}, |
|
numpages = {9}, |
|
location = {Atlanta, GA, USA}, |
|
series = {FAT* '19} |
|
}""" |
|
), |
|
), |
|
] |
|
|
|
def _info(self): |
|
features = {"text": datasets.Value("string"), "label": datasets.ClassLabel(names=self.config.label_classes), |
|
"foil": datasets.ClassLabel(names=self.config.label_classes), |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"rationales": datasets.Sequence(datasets.Value("int")), |
|
"contrastive_rationales": datasets.Sequence(datasets.Value("int"))} |
|
return datasets.DatasetInfo( |
|
description=self.config.description, |
|
features=datasets.Features(features), |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + MAIN_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(self.config.data_url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, f"train.jsonl"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "test.jsonl"), |
|
"split": "test", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, f"validation.jsonl"), |
|
"split": "val", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test-extra", |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, f"test_extra.jsonl"), |
|
"split": "test-extra", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""This function returns the examples in the raw (text) form.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
data = json.loads(row) |
|
example = { |
|
"text": data["text"], |
|
"label": data[self.config.label_column] |
|
} |
|
if split != "test-extra": |
|
example["foil"] = data["foil"] |
|
example["words"] = data["words"] |
|
example["rationales"] = data["rationales"] |
|
example["contrastive_rationales"] = data["contrastive_rationales"] |
|
else: |
|
example["foil"] = 'N/A' |
|
example["words"] = ['N/A'] |
|
example["rationales"] = [0] |
|
example["contrastive_rationales"] = [0] |
|
yield id_, example |