neel-alex commited on
Commit
89aebf9
2 Parent(s): 59de89d fe8663f

Merge branch 'main' of https://huggingface.co/datasets/ought/raft into main

Browse files
Files changed (1) hide show
  1. raft.py +292 -36
raft.py CHANGED
@@ -16,6 +16,7 @@
16
  import csv
17
  import json
18
  import os
 
19
 
20
  import datasets
21
 
@@ -44,10 +45,273 @@ _LICENSE = ""
44
  # The HuggingFace dataset library don't host the datasets but only point to the original files
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
  # This gets all folders within the directory named `data`
47
- DATA_DIRS = next(os.walk('data'))[1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- _URLs = {s: {'train': f"data/{s}/train.csv",
50
- 'test': f"data/{s}/test_unlabeled.csv"} for s in DATA_DIRS}
51
 
52
 
53
  class Raft(datasets.GeneratorBasedBuilder):
@@ -66,36 +330,29 @@ class Raft(datasets.GeneratorBasedBuilder):
66
  # You will be able to load one or the other configurations in the following list with
67
  # data = datasets.load_dataset('my_dataset', 'first_domain')
68
  # data = datasets.load_dataset('my_dataset', 'second_domain')
69
-
70
- # TODO: Load task jsons
71
-
72
- tasks = {}
73
- for sd in DATA_DIRS:
74
- with open(os.path.join('data', sd, 'task.json')) as f:
75
- task_data = json.load(f)
76
- tasks[sd] = task_data
77
-
78
  BUILDER_CONFIGS = []
79
- for key in tasks:
80
- td = tasks[key]
81
- name = td['name']
82
- description = td['description']
83
- BUILDER_CONFIGS.append(datasets.BuilderConfig(name=name, version=VERSION,
84
- description=description))
85
 
86
- DEFAULT_CONFIG_NAME = "tai_safety_research" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
 
 
 
 
 
 
 
87
 
88
  def _info(self):
89
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
90
  DEFAULT_LABEL_NAME = "Unlabeled"
91
 
92
- task = Raft.tasks[self.config.name]
93
- data_columns = {col_name: datasets.Value("string") for col_name in
94
- task['data_columns']}
95
 
96
  label_columns = {}
97
- for label_name in task['label_columns']:
98
- labels = [DEFAULT_LABEL_NAME] + task['label_columns'][label_name]
99
  label_columns[label_name] = datasets.ClassLabel(len(labels), labels)
100
 
101
  # Merge dicts
@@ -129,27 +386,26 @@ class Raft(datasets.GeneratorBasedBuilder):
129
  data_dir = dl_manager.download_and_extract(_URLs)
130
  dataset = self.config.name
131
  return [
132
- datasets.SplitGenerator(name=datasets.Split.TRAIN,
133
- gen_kwargs={"filepath": data_dir[dataset]['train'],
134
- "split": "train"}),
135
- datasets.SplitGenerator(name=datasets.Split.TEST,
136
- gen_kwargs={"filepath": data_dir[dataset]['test'],
137
- "split": "test"})
138
  ]
139
 
140
  def _generate_examples(
141
- self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
142
  ):
143
- """ Yields examples as (key, example) tuples. """
144
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
145
  # The `key` is here for legacy reason (tfds) and is not important in itself.
146
 
147
- task = Raft.tasks[self.config.name]
148
- labels = list(task['label_columns'])
149
 
150
  with open(filepath, encoding="utf-8") as f:
151
- csv_reader = csv.reader(f, quotechar='"', delimiter=",",
152
- quoting=csv.QUOTE_ALL, skipinitialspace=True)
153
  column_names = next(csv_reader)
154
  # Test csvs don't have any label columns.
155
  if split == "test":
16
  import csv
17
  import json
18
  import os
19
+ from pathlib import Path
20
 
21
  import datasets
22
 
45
  # The HuggingFace dataset library don't host the datasets but only point to the original files
46
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
47
  # This gets all folders within the directory named `data`
48
+ DATA_DIR_URL = "data/" # "https://huggingface.co/datasets/ought/raft/resolve/main/data/"
49
+ # print([p for p in DATA_DIR_PATH.iterdir() if p.is_dir()])
50
+ TASKS = {
51
+ "ade_corpus_v2": {
52
+ "name": "ade_corpus_v2",
53
+ "description": "",
54
+ "data_columns": [
55
+ "Sentence",
56
+ "ID"
57
+ ],
58
+ "label_columns": {
59
+ "Label": [
60
+ "ADE-related",
61
+ "not ADE-related"
62
+ ]
63
+ }
64
+ },
65
+ "banking_77": {
66
+ "name": "banking_77",
67
+ "description": "",
68
+ "data_columns": [
69
+ "Query",
70
+ "ID"
71
+ ],
72
+ "label_columns": {
73
+ "Label": [
74
+ "Refund_not_showing_up",
75
+ "activate_my_card",
76
+ "age_limit",
77
+ "apple_pay_or_google_pay",
78
+ "atm_support",
79
+ "automatic_top_up",
80
+ "balance_not_updated_after_bank_transfer",
81
+ "balance_not_updated_after_cheque_or_cash_deposit",
82
+ "beneficiary_not_allowed",
83
+ "cancel_transfer",
84
+ "card_about_to_expire",
85
+ "card_acceptance",
86
+ "card_arrival",
87
+ "card_delivery_estimate",
88
+ "card_linking",
89
+ "card_not_working",
90
+ "card_payment_fee_charged",
91
+ "card_payment_not_recognised",
92
+ "card_payment_wrong_exchange_rate",
93
+ "card_swallowed",
94
+ "cash_withdrawal_charge",
95
+ "cash_withdrawal_not_recognised",
96
+ "change_pin",
97
+ "compromised_card",
98
+ "contactless_not_working",
99
+ "country_support",
100
+ "declined_card_payment",
101
+ "declined_cash_withdrawal",
102
+ "declined_transfer",
103
+ "direct_debit_payment_not_recognised",
104
+ "disposable_card_limits",
105
+ "edit_personal_details",
106
+ "exchange_charge",
107
+ "exchange_rate",
108
+ "exchange_via_app",
109
+ "extra_charge_on_statement",
110
+ "failed_transfer",
111
+ "fiat_currency_support",
112
+ "get_disposable_virtual_card",
113
+ "get_physical_card",
114
+ "getting_spare_card",
115
+ "getting_virtual_card",
116
+ "lost_or_stolen_card",
117
+ "lost_or_stolen_phone",
118
+ "order_physical_card",
119
+ "passcode_forgotten",
120
+ "pending_card_payment",
121
+ "pending_cash_withdrawal",
122
+ "pending_top_up",
123
+ "pending_transfer",
124
+ "pin_blocked",
125
+ "receiving_money",
126
+ "request_refund",
127
+ "reverted_card_payment?",
128
+ "supported_cards_and_currencies",
129
+ "terminate_account",
130
+ "top_up_by_bank_transfer_charge",
131
+ "top_up_by_card_charge",
132
+ "top_up_by_cash_or_cheque",
133
+ "top_up_failed",
134
+ "top_up_limits",
135
+ "top_up_reverted",
136
+ "topping_up_by_card",
137
+ "transaction_charged_twice",
138
+ "transfer_fee_charged",
139
+ "transfer_into_account",
140
+ "transfer_not_received_by_recipient",
141
+ "transfer_timing",
142
+ "unable_to_verify_identity",
143
+ "verify_my_identity",
144
+ "verify_source_of_funds",
145
+ "verify_top_up",
146
+ "virtual_card_not_working",
147
+ "visa_or_mastercard",
148
+ "why_verify_identity",
149
+ "wrong_amount_of_cash_received",
150
+ "wrong_exchange_rate_for_cash_withdrawal"
151
+ ]
152
+ }
153
+ },
154
+ "terms_of_service": {
155
+ "name": "terms_of_service",
156
+ "description": "",
157
+ "data_columns": [
158
+ "Sentence",
159
+ "ID"
160
+ ],
161
+ "label_columns": {
162
+ "Label": [
163
+ "not potentially unfair",
164
+ "potentially unfair"
165
+ ]
166
+ }
167
+ },
168
+ "tai_safety_research": {
169
+ "name": "tai_safety_research",
170
+ "description": "",
171
+ "data_columns": [
172
+ "Title",
173
+ "Abstract Note",
174
+ "Url",
175
+ "Publication Year",
176
+ "Item Type",
177
+ "Author",
178
+ "Publication Title",
179
+ "ID"
180
+ ],
181
+ "label_columns": {
182
+ "Label": [
183
+ "TAI safety research",
184
+ "not TAI safety research"
185
+ ]
186
+ }
187
+ },
188
+ "neurips_impact_statement_risks": {
189
+ "name": "neurips_impact_statement_risks",
190
+ "description": "",
191
+ "data_columns": [
192
+ "Paper title",
193
+ "Paper link",
194
+ "Impact statement",
195
+ "ID"
196
+ ],
197
+ "label_columns": {
198
+ "Label": [
199
+ "doesn't mention a harmful application",
200
+ "mentions a harmful application"
201
+ ]
202
+ }
203
+ },
204
+ "medical_subdomain_of_clinical_notes": {
205
+ "name": "medical_subdomain_of_clinical_notes",
206
+ "description": "",
207
+ "data_columns": [
208
+ "Note",
209
+ "ID"
210
+ ],
211
+ "label_columns": {
212
+ "Label": [
213
+ "cardiology",
214
+ "gastroenterology",
215
+ "nephrology",
216
+ "neurology",
217
+ "psychiatry",
218
+ "pulmonary disease"
219
+ ]
220
+ }
221
+ },
222
+ "overruling": {
223
+ "name": "overruling",
224
+ "description": "",
225
+ "data_columns": [
226
+ "Sentence",
227
+ "ID"
228
+ ],
229
+ "label_columns": {
230
+ "Label": [
231
+ "not overruling",
232
+ "overruling"
233
+ ]
234
+ }
235
+ },
236
+ "systematic_review_inclusion": {
237
+ "name": "systematic_review_inclusion",
238
+ "description": "",
239
+ "data_columns": [
240
+ "Title",
241
+ "Abstract",
242
+ "Authors",
243
+ "Journal",
244
+ "ID"
245
+ ],
246
+ "label_columns": {
247
+ "Label": [
248
+ "included",
249
+ "not included"
250
+ ]
251
+ }
252
+ },
253
+ "one_stop_english": {
254
+ "name": "one_stop_english",
255
+ "description": "",
256
+ "data_columns": [
257
+ "Article",
258
+ "ID"
259
+ ],
260
+ "label_columns": {
261
+ "Label": [
262
+ "advanced",
263
+ "elementary",
264
+ "intermediate"
265
+ ]
266
+ }
267
+ },
268
+ "tweet_eval_hate": {
269
+ "name": "tweet_eval_hate",
270
+ "description": "",
271
+ "data_columns": [
272
+ "Tweet",
273
+ "ID"
274
+ ],
275
+ "label_columns": {
276
+ "Label": [
277
+ "hate speech",
278
+ "not hate speech"
279
+ ]
280
+ }
281
+ },
282
+ "twitter_complaints": {
283
+ "name": "twitter_complaints",
284
+ "description": "",
285
+ "data_columns": [
286
+ "Tweet text",
287
+ "ID"
288
+ ],
289
+ "label_columns": {
290
+ "Label": [
291
+ "complaint",
292
+ "no complaint"
293
+ ]
294
+ }
295
+ },
296
+ "semiconductor_org_types": {
297
+ "name": "semiconductor_org_types",
298
+ "description": "",
299
+ "data_columns": [
300
+ "Paper title",
301
+ "Organization name",
302
+ "ID"
303
+ ],
304
+ "label_columns": {
305
+ "Label": [
306
+ "company",
307
+ "research institute",
308
+ "university"
309
+ ]
310
+ }
311
+ },
312
+ }
313
 
314
+ _URLs = {s: {"train": f"{DATA_DIR_URL}{s}/train.csv", "test": f"{DATA_DIR_URL}{s}/test_unlabeled.csv"} for s in TASKS}
 
315
 
316
 
317
  class Raft(datasets.GeneratorBasedBuilder):
330
  # You will be able to load one or the other configurations in the following list with
331
  # data = datasets.load_dataset('my_dataset', 'first_domain')
332
  # data = datasets.load_dataset('my_dataset', 'second_domain')
 
 
 
 
 
 
 
 
 
333
  BUILDER_CONFIGS = []
 
 
 
 
 
 
334
 
335
+ for key in TASKS:
336
+ td = TASKS[key]
337
+ name = td["name"]
338
+ description = td["description"]
339
+ BUILDER_CONFIGS.append(datasets.BuilderConfig(name=name, version=VERSION, description=description))
340
+
341
+ DEFAULT_CONFIG_NAME = (
342
+ "tai_safety_research" # It's not mandatory to have a default configuration. Just use one if it make sense.
343
+ )
344
 
345
  def _info(self):
346
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
347
  DEFAULT_LABEL_NAME = "Unlabeled"
348
 
349
+ task = TASKS[self.config.name]
350
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
351
+ data_columns = {col_name: datasets.Value("string") for col_name in task["data_columns"]}
352
 
353
  label_columns = {}
354
+ for label_name in task["label_columns"]:
355
+ labels = [DEFAULT_LABEL_NAME] + task["label_columns"][label_name]
356
  label_columns[label_name] = datasets.ClassLabel(len(labels), labels)
357
 
358
  # Merge dicts
386
  data_dir = dl_manager.download_and_extract(_URLs)
387
  dataset = self.config.name
388
  return [
389
+ datasets.SplitGenerator(
390
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir[dataset]["train"], "split": "train"}
391
+ ),
392
+ datasets.SplitGenerator(
393
+ name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir[dataset]["test"], "split": "test"}
394
+ ),
395
  ]
396
 
397
  def _generate_examples(
398
+ self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
399
  ):
400
+ """Yields examples as (key, example) tuples."""
401
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
402
  # The `key` is here for legacy reason (tfds) and is not important in itself.
403
 
404
+ task = TASKS[self.config.name]
405
+ labels = list(task["label_columns"])
406
 
407
  with open(filepath, encoding="utf-8") as f:
408
+ csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
 
409
  column_names = next(csv_reader)
410
  # Test csvs don't have any label columns.
411
  if split == "test":