nlp-thedeep commited on
Commit
f9aab8c
1 Parent(s): 7279a4c

Update humset.py

Browse files
Files changed (1) hide show
  1. humset.py +0 -58
humset.py CHANGED
@@ -37,10 +37,6 @@ _DESCRIPTION = """\
37
  HumSet is a novel and rich multilingual dataset of humanitarian response documents annotated by experts in the humanitarian response community. HumSet is curated by humanitarian analysts and covers various disasters around the globe that occurred from 2018 to 2021 in 46 humanitarian response projects. The dataset consists of approximately 17K annotated documents in three languages of English, French, and Spanish, originally taken from publicly-available resources. For each document, analysts have identified informative snippets (entries) in respect to common humanitarian frameworks, and assigned one or many classes to each entry. See the our paper for details.
38
  """
39
 
40
- _DESCRIPTION_BIAS = """\
41
- HUMSETBIAS is a subset of the English part of the HUMSET dataset, created by searching for specific sensitive English keywords related to genders and countries within the annotated text. In addition, we extended this
42
- subset by incorporating targeted counterfactual samples, generated by modifying the original entries in order to create the altered versions of each text with gender/country information. The purpose of HUMSETBIAS is to provide a more targeted resource for analyzing and addressing potential biases in humanitarian data and to enable the development of accurate and bias-aware NLP applications in the humanitarian sector.
43
- """
44
 
45
  _HOMEPAGE = "https://huggingface.co/datasets/nlp-thedeep/humset"
46
 
@@ -57,12 +53,6 @@ _URLs = {
57
  "train": "data/train_1_1.jsonl",
58
  "dev": "data/validation_1_1.jsonl",
59
  "test": "data/test_1_1.jsonl"
60
- },
61
- "humsetbias": {
62
- "train": "data/humset_bias_train.jsonl",
63
- "dev": "data/humset_bias_val.jsonl",
64
- "gender": "data/test_gender.jsonl",
65
- "country": "data/test_country.jsonl"
66
  }
67
  }
68
 
@@ -72,7 +62,6 @@ _SUPPORTED_VERSIONS = [
72
  datasets.Version("1.0.0", "Only primary tags"),
73
  # Second version
74
  datasets.Version("2.0.0", "Extented data points including secondary tags and geolocations"),
75
- datasets.Version("humsetbias", "Gender and Country bias extension of HumSet")
76
  ]
77
 
78
 
@@ -130,26 +119,6 @@ SECOND_FEATURES = datasets.Features(
130
  }
131
  )
132
 
133
- HUMSETBIAS_FEATURES = datasets.Features(
134
- {
135
- "entry_id": datasets.Value("string"),
136
- "excerpt": datasets.Value("string"),
137
- "lang": datasets.Value("string"),
138
- "keywords": datasets.Sequence(datasets.Value("string"), length=-1),
139
- "gender_keywords": datasets.Sequence(datasets.Value("string"), length=-1),
140
- "country_keywords": datasets.Sequence(datasets.Value("string"), length=-1),
141
- "gender_kword_type": datasets.Sequence(datasets.Value("string"), length=-1),
142
- "country_kword_type": datasets.Sequence(datasets.Value("string"), length=-1),
143
- "gender_context_falsing_kw": datasets.Sequence(datasets.Value("string"), length=-1),
144
- "country_context_falsing_kw": datasets.Sequence(datasets.Value("string"), length=-1),
145
- "excerpt_type": datasets.Value("string"),
146
- "sectors": datasets.Sequence(datasets.Value("string"), length=-1),
147
- "pillars_1d": datasets.Sequence(datasets.Value("string"), length=-1),
148
- "pillars_2d": datasets.Sequence(datasets.Value("string"), length=-1),
149
- "subpillars_1d": datasets.Sequence(datasets.Value("string"), length=-1),
150
- "subpillars_2d": datasets.Sequence(datasets.Value("string"), length=-1),
151
- }
152
- )
153
  class HumsetConfig(datasets.BuilderConfig):
154
  """BuilderConfig for DuoRC SelfRC."""
155
 
@@ -197,16 +166,6 @@ class Humset(datasets.GeneratorBasedBuilder):
197
  citation=_CITATION,
198
  )
199
 
200
- elif self.config.name == "humsetbias":
201
- return datasets.DatasetInfo(
202
- # This is the description that will appear on the datasets page.
203
- description=_DESCRIPTION_BIAS,
204
- # This defines the different columns of the dataset and their types
205
- features=HUMSETBIAS_FEATURES,
206
- homepage=_HOMEPAGE,
207
- license=_LICENSE,
208
- citation=_CITATION,
209
- )
210
 
211
  def _split_generators(self, dl_manager):
212
 
@@ -238,23 +197,6 @@ class Humset(datasets.GeneratorBasedBuilder):
238
  )
239
  ]
240
 
241
- elif self.config.name == "humsetbias":
242
-
243
- splits = splits + [
244
-
245
- datasets.SplitGenerator(
246
- name=datasets.Split.TEST,
247
- gen_kwargs={
248
- "filepath": downloaded_files["gender"],
249
- },
250
- ),
251
- datasets.SplitGenerator(
252
- name=datasets.Split.TEST,
253
- gen_kwargs={
254
- "filepath": downloaded_files["country"],
255
- },
256
- )
257
- ]
258
  return splits
259
 
260
  def _generate_examples(self, filepath):
 
37
  HumSet is a novel and rich multilingual dataset of humanitarian response documents annotated by experts in the humanitarian response community. HumSet is curated by humanitarian analysts and covers various disasters around the globe that occurred from 2018 to 2021 in 46 humanitarian response projects. The dataset consists of approximately 17K annotated documents in three languages of English, French, and Spanish, originally taken from publicly-available resources. For each document, analysts have identified informative snippets (entries) in respect to common humanitarian frameworks, and assigned one or many classes to each entry. See the our paper for details.
38
  """
39
 
 
 
 
 
40
 
41
  _HOMEPAGE = "https://huggingface.co/datasets/nlp-thedeep/humset"
42
 
 
53
  "train": "data/train_1_1.jsonl",
54
  "dev": "data/validation_1_1.jsonl",
55
  "test": "data/test_1_1.jsonl"
 
 
 
 
 
 
56
  }
57
  }
58
 
 
62
  datasets.Version("1.0.0", "Only primary tags"),
63
  # Second version
64
  datasets.Version("2.0.0", "Extented data points including secondary tags and geolocations"),
 
65
  ]
66
 
67
 
 
119
  }
120
  )
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  class HumsetConfig(datasets.BuilderConfig):
123
  """BuilderConfig for DuoRC SelfRC."""
124
 
 
166
  citation=_CITATION,
167
  )
168
 
 
 
 
 
 
 
 
 
 
 
169
 
170
  def _split_generators(self, dl_manager):
171
 
 
197
  )
198
  ]
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  return splits
201
 
202
  def _generate_examples(self, filepath):