Datasets:

Languages:
English
ArXiv:
License:
kiddothe2b commited on
Commit
5f93979
1 Parent(s): 6ea52b4

Create medical-bios.py

Browse files
Files changed (1) hide show
  1. medical-bios.py +166 -0
medical-bios.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Medical BIOS"""
16
+
17
+ import json
18
+ import os
19
+ import textwrap
20
+
21
+ import datasets
22
+
23
+
24
+ MAIN_CITATION = """NA"""
25
+ _DESCRIPTION = """NA"""
26
+ MAIN_PATH = 'https://huggingface.co/datasets/coastalcph/medical-bios/resolve/main'
27
+
28
+
29
+ class MedicalBIOSConfig(datasets.BuilderConfig):
30
+ """BuilderConfig for Medical BIOS."""
31
+
32
+ def __init__(
33
+ self,
34
+ url,
35
+ data_url,
36
+ citation,
37
+ **kwargs,
38
+ ):
39
+ """BuilderConfig for Medical BIOS.
40
+ Args:
41
+ url: `string`, url for the original project
42
+ data_url: `string`, url to download the zip file from
43
+ data_file: `string`, filename for data set
44
+ citation: `string`, citation for the data set
45
+ url: `string`, url for information about the data set
46
+ **kwargs: keyword arguments forwarded to super.
47
+ """
48
+ super(MedicalBIOSConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
49
+ self.url = url
50
+ self.data_url = data_url
51
+ self.citation = citation
52
+
53
+
54
+ class XAIFairness(datasets.GeneratorBasedBuilder):
55
+ """Fairlex: A multilingual benchmark for evaluating fairness in legal text processing. Version 1.0"""
56
+
57
+ BUILDER_CONFIGS = [
58
+ MedicalBIOSConfig(
59
+ name="standard",
60
+ description=textwrap.dedent(
61
+ """\
62
+ The dataset is based on the Common Crawl. Specifically, De-Arteaga et al. identified online
63
+ biographies, written in English, by filtering for lines that began
64
+ with a name-like pattern (i.e., a sequence of two capitalized words)
65
+ followed by the string “is a(n) (xxx) title,” where title is
66
+ an occupation from the BLS Standard Occupation Classification system.
67
+ This version of the dataset comprises English biographies labeled with occupations.
68
+ We also include a subset of biographies labeled with human rationales.
69
+ """
70
+ ),
71
+ label_classes=['psychologist', 'surgeon', 'nurse', 'dentist', 'physician'],
72
+ data_url=os.path.join(MAIN_PATH, "bios.zip"),
73
+ url="https://github.com/microsoft/biosbias",
74
+ citation=textwrap.dedent(
75
+ """\
76
+ @inproceedings{10.1145/3287560.3287572,
77
+ author = {De-Arteaga, Maria and Romanov, Alexey and Wallach, Hanna and Chayes,
78
+ Jennifer and Borgs, Christian and Chouldechova, Alexandra and Geyik, Sahin
79
+ and Kenthapadi, Krishnaram and Kalai, Adam Tauman},
80
+ title = {Bias in Bios: A Case Study of Semantic Representation Bias in a High-Stakes Setting},
81
+ year = {2019},
82
+ isbn = {9781450361255},
83
+ publisher = {Association for Computing Machinery},
84
+ address = {New York, NY, USA},
85
+ url = {https://doi.org/10.1145/3287560.3287572},
86
+ doi = {10.1145/3287560.3287572},
87
+ booktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency},
88
+ pages = {120–128},
89
+ numpages = {9},
90
+ location = {Atlanta, GA, USA},
91
+ series = {FAT* '19}
92
+ }"""
93
+ ),
94
+ ),
95
+ ]
96
+
97
+ def _info(self):
98
+ features = {"text": datasets.Value("string"), "label": datasets.ClassLabel(names=self.config.label_classes),
99
+ "foil": datasets.ClassLabel(names=self.config.label_classes),
100
+ "words": datasets.Sequence(datasets.Value("string")),
101
+ "rationales": datasets.Sequence(datasets.Value("int")),
102
+ "contrastive_rationales": datasets.Sequence(datasets.Value("int"))}
103
+ return datasets.DatasetInfo(
104
+ description=self.config.description,
105
+ features=datasets.Features(features),
106
+ homepage=self.config.url,
107
+ citation=self.config.citation + "\n" + MAIN_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ data_dir = dl_manager.download_and_extract(self.config.data_url)
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ # These kwargs will be passed to _generate_examples
116
+ gen_kwargs={
117
+ "filepath": os.path.join(data_dir, f"train.jsonl"),
118
+ "split": "train",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ # These kwargs will be passed to _generate_examples
124
+ gen_kwargs={
125
+ "filepath": os.path.join(data_dir, "test.jsonl"),
126
+ "split": "test",
127
+ },
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.VALIDATION,
131
+ # These kwargs will be passed to _generate_examples
132
+ gen_kwargs={
133
+ "filepath": os.path.join(data_dir, f"validation.jsonl"),
134
+ "split": "val",
135
+ },
136
+ ),
137
+ datasets.SplitGenerator(
138
+ name="test-extra",
139
+ # These kwargs will be passed to _generate_examples
140
+ gen_kwargs={
141
+ "filepath": os.path.join(data_dir, f"test_extra.jsonl"),
142
+ "split": "test-extra",
143
+ },
144
+ ),
145
+ ]
146
+
147
+ def _generate_examples(self, filepath, split):
148
+ """This function returns the examples in the raw (text) form."""
149
+ with open(filepath, encoding="utf-8") as f:
150
+ for id_, row in enumerate(f):
151
+ data = json.loads(row)
152
+ example = {
153
+ "text": data["text"],
154
+ "label": data[self.config.label_column]
155
+ }
156
+ if split != "test-extra":
157
+ example["foil"] = data["foil"]
158
+ example["words"] = data["words"]
159
+ example["rationales"] = data["rationales"]
160
+ example["contrastive_rationales"] = data["contrastive_rationales"]
161
+ else:
162
+ example["foil"] = None
163
+ example["words"] = None
164
+ example["rationales"] = None
165
+ example["contrastive_rationales"] = None
166
+ yield id_, example