nlp-thedeep commited on
Commit
e8fc790
1 Parent(s): ac1c84e

Update humset.py

Browse files
Files changed (1) hide show
  1. humset.py +67 -2
humset.py CHANGED
@@ -37,6 +37,11 @@ _DESCRIPTION = """\
37
  HumSet is a novel and rich multilingual dataset of humanitarian response documents annotated by experts in the humanitarian response community. HumSet is curated by humanitarian analysts and covers various disasters around the globe that occurred from 2018 to 2021 in 46 humanitarian response projects. The dataset consists of approximately 17K annotated documents in three languages of English, French, and Spanish, originally taken from publicly-available resources. For each document, analysts have identified informative snippets (entries) in respect to common humanitarian frameworks, and assigned one or many classes to each entry. See the our paper for details.
38
  """
39
 
 
 
 
 
 
40
  _HOMEPAGE = "https://huggingface.co/datasets/nlp-thedeep/humset"
41
 
42
  _LICENSE = "The GitHub repository which houses this dataset has an Apache License 2.0."
@@ -53,6 +58,12 @@ _URLs = {
53
  "dev": "data/validation_1_1.jsonl",
54
  "test": "data/test_1_1.jsonl"
55
  }
 
 
 
 
 
 
56
  }
57
 
58
 
@@ -61,6 +72,7 @@ _SUPPORTED_VERSIONS = [
61
  datasets.Version("1.0.0", "Only primary tags"),
62
  # Second version
63
  datasets.Version("2.0.0", "Extented data points including secondary tags and geolocations"),
 
64
  ]
65
 
66
 
@@ -118,6 +130,26 @@ SECOND_FEATURES = datasets.Features(
118
  }
119
  )
120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  class HumsetConfig(datasets.BuilderConfig):
122
  """BuilderConfig for DuoRC SelfRC."""
123
 
@@ -165,13 +197,24 @@ class Humset(datasets.GeneratorBasedBuilder):
165
  citation=_CITATION,
166
  )
167
 
 
 
 
 
 
 
 
 
 
 
 
168
  def _split_generators(self, dl_manager):
169
 
170
  """Returns SplitGenerators."""
171
 
172
  my_urls = _URLs[self.config.name]
173
  downloaded_files = dl_manager.download_and_extract(my_urls)
174
- return [
175
  datasets.SplitGenerator(
176
  name=datasets.Split.TRAIN,
177
  gen_kwargs={
@@ -183,14 +226,36 @@ class Humset(datasets.GeneratorBasedBuilder):
183
  gen_kwargs={
184
  "filepath": downloaded_files["dev"],
185
  },
186
- ),
 
 
 
187
  datasets.SplitGenerator(
188
  name=datasets.Split.TEST,
189
  gen_kwargs={
190
  "filepath": downloaded_files["test"],
191
  },
 
 
 
 
 
 
 
 
 
 
 
 
192
  ),
 
 
 
 
 
 
193
  ]
 
194
 
195
  def _generate_examples(self, filepath):
196
 
 
37
  HumSet is a novel and rich multilingual dataset of humanitarian response documents annotated by experts in the humanitarian response community. HumSet is curated by humanitarian analysts and covers various disasters around the globe that occurred from 2018 to 2021 in 46 humanitarian response projects. The dataset consists of approximately 17K annotated documents in three languages of English, French, and Spanish, originally taken from publicly-available resources. For each document, analysts have identified informative snippets (entries) in respect to common humanitarian frameworks, and assigned one or many classes to each entry. See the our paper for details.
38
  """
39
 
40
+ _DESCRIPTION_BIAS = """\
41
+ HUMSETBIAS is a subset of the English part of the HUMSET dataset, created by searching for specific sensitive English keywords related to genders and countries within the annotated text. In addition, we extended this
42
+ subset by incorporating targeted counterfactual samples, generated by modifying the original entries in order to create the altered versions of each text with gender/country information. The purpose of HUMSETBIAS is to provide a more targeted resource for analyzing and addressing potential biases in humanitarian data and to enable the development of accurate and bias-aware NLP applications in the humanitarian sector.
43
+ """
44
+
45
  _HOMEPAGE = "https://huggingface.co/datasets/nlp-thedeep/humset"
46
 
47
  _LICENSE = "The GitHub repository which houses this dataset has an Apache License 2.0."
 
58
  "dev": "data/validation_1_1.jsonl",
59
  "test": "data/test_1_1.jsonl"
60
  }
61
+ "humsetbias": {
62
+ "train": "data/humset_bias_train.jsonl",
63
+ "dev": "data/humset_bias_val.jsonl",
64
+ "gender": "data/test_gender.jsonl",
65
+ "country": "data/test_country.jsonl"
66
+ }
67
  }
68
 
69
 
 
72
  datasets.Version("1.0.0", "Only primary tags"),
73
  # Second version
74
  datasets.Version("2.0.0", "Extented data points including secondary tags and geolocations"),
75
+ datasets.Version("humsetbias", "Gender and Country bias extension of HumSet")
76
  ]
77
 
78
 
 
130
  }
131
  )
132
 
133
+ HUMSETBIAS_FEATURES = datasets.Features(
134
+ {
135
+ "entry_id": datasets.Value("string"),
136
+ "excerpt": datasets.Value("string"),
137
+ "lang": datasets.Value("string"),
138
+ "keywords": datasets.Sequence(datasets.Value("string"), length=-1),
139
+ "gender_keywords": datasets.Sequence(datasets.Value("string"), length=-1),
140
+ "country_keywords": datasets.Sequence(datasets.Value("string"), length=-1),
141
+ "gender_kword_type": datasets.Sequence(datasets.Value("string"), length=-1),
142
+ "country_kword_type": datasets.Sequence(datasets.Value("string"), length=-1),
143
+ "gender_context_falsing_kw": datasets.Sequence(datasets.Value("string"), length=-1),
144
+ "country_context_falsing_kw": datasets.Sequence(datasets.Value("string"), length=-1),
145
+ "excerpt_type": datasets.Value("string"),
146
+ "sectors": datasets.Sequence(datasets.Value("string"), length=-1),
147
+ "pillars_1d": datasets.Sequence(datasets.Value("string"), length=-1),
148
+ "pillars_2d": datasets.Sequence(datasets.Value("string"), length=-1),
149
+ "subpillars_1d": datasets.Sequence(datasets.Value("string"), length=-1),
150
+ "subpillars_2d": datasets.Sequence(datasets.Value("string"), length=-1),
151
+ }
152
+ )
153
  class HumsetConfig(datasets.BuilderConfig):
154
  """BuilderConfig for DuoRC SelfRC."""
155
 
 
197
  citation=_CITATION,
198
  )
199
 
200
+ elif self.config.name == "humsetbias":
201
+ return datasets.DatasetInfo(
202
+ # This is the description that will appear on the datasets page.
203
+ description=_DESCRIPTION_BIAS,
204
+ # This defines the different columns of the dataset and their types
205
+ features=HUMSETBIAS_FEATURES,
206
+ homepage=_HOMEPAGE,
207
+ license=_LICENSE,
208
+ citation=_CITATION,
209
+ )
210
+
211
  def _split_generators(self, dl_manager):
212
 
213
  """Returns SplitGenerators."""
214
 
215
  my_urls = _URLs[self.config.name]
216
  downloaded_files = dl_manager.download_and_extract(my_urls)
217
+ splits = [
218
  datasets.SplitGenerator(
219
  name=datasets.Split.TRAIN,
220
  gen_kwargs={
 
226
  gen_kwargs={
227
  "filepath": downloaded_files["dev"],
228
  },
229
+ )]
230
+
231
+ if self.config.name in ["1.0.0", "2.00"]:
232
+ splits = splits + [
233
  datasets.SplitGenerator(
234
  name=datasets.Split.TEST,
235
  gen_kwargs={
236
  "filepath": downloaded_files["test"],
237
  },
238
+ )
239
+ ]
240
+
241
+ elif self.config.name == "humsetbias":
242
+
243
+ splits = splits + [
244
+
245
+ datasets.SplitGenerator(
246
+ name=datasets.Split.TEST,
247
+ gen_kwargs={
248
+ "filepath": downloaded_files["gender"],
249
+ },
250
  ),
251
+ datasets.SplitGenerator(
252
+ name=datasets.Split.TEST,
253
+ gen_kwargs={
254
+ "filepath": downloaded_files["country"],
255
+ },
256
+ )
257
  ]
258
+ return splits
259
 
260
  def _generate_examples(self, filepath):
261