plaguss HF staff commited on
Commit
1ab8239
1 Parent(s): bd0fe45

Include code snippet with the dataset creation

Browse files
Files changed (1) hide show
  1. README.md +113 -0
README.md CHANGED
@@ -265,6 +265,119 @@ The dataset contains a single split, which is `train`.
265
 
266
  ## Dataset Creation
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  ### Curation Rationale
269
 
270
  [More Information Needed]
 
265
 
266
  ## Dataset Creation
267
 
268
+ ### Script used for the generation
269
+
270
+ ```python
271
+ import argilla as rg
272
+ from datasets import load_dataset
273
+ import uuid
274
+ from datasets import concatenate_datasets
275
+
276
+ ds = load_dataset("go_emotions", "raw", split="train")
277
+ ds_prepared = load_dataset("go_emotions")
278
+
279
+ _CLASS_NAMES = [
280
+ "admiration",
281
+ "amusement",
282
+ "anger",
283
+ "annoyance",
284
+ "approval",
285
+ "caring",
286
+ "confusion",
287
+ "curiosity",
288
+ "desire",
289
+ "disappointment",
290
+ "disapproval",
291
+ "disgust",
292
+ "embarrassment",
293
+ "excitement",
294
+ "fear",
295
+ "gratitude",
296
+ "grief",
297
+ "joy",
298
+ "love",
299
+ "nervousness",
300
+ "optimism",
301
+ "pride",
302
+ "realization",
303
+ "relief",
304
+ "remorse",
305
+ "sadness",
306
+ "surprise",
307
+ "neutral",
308
+ ]
309
+ label_to_id = {label: i for i, label in enumerate(_CLASS_NAMES)}
310
+ id_to_label = {i: label for i, label in enumerate(_CLASS_NAMES)}
311
+
312
+ # Concatenate the datasets and transform to pd.DataFrame
313
+
314
+ ds_prepared = concatenate_datasets([ds_prepared["train"], ds_prepared["validation"], ds_prepared["test"]])
315
+ df_prepared = ds_prepared.to_pandas()
316
+
317
+ # Obtain the final labels as a dict, to later include these as suggestions
318
+
319
+ labels_prepared = {}
320
+ for idx in df_prepared.index:
321
+ labels = [id_to_label[label_id] for label_id in df_prepared['labels'][idx]]
322
+ labels_prepared[df_prepared['id'][idx]] = labels
323
+
324
+ # Add labels to the dataset and keep only the relevant columns
325
+
326
+ def add_labels(ex):
327
+ labels = []
328
+ for label in _CLASS_NAMES:
329
+ if ex[label] == 1:
330
+ labels.append(label)
331
+ ex["labels"] = labels
332
+
333
+ return ex
334
+
335
+ ds = ds.map(add_labels)
336
+ df = ds.select_columns(["text", "labels", "rater_id", "id"]).to_pandas()
337
+
338
+ # Create a FeedbackDataset for text classification
339
+
340
+ feedback_dataset = rg.FeedbackDataset.for_text_classification(labels=_CLASS_NAMES, multi_label=True)
341
+
342
+ # Create the records with the original responses, and use as suggestions
343
+ # the final labels in the "simplified" go_emotions dataset.
344
+
345
+ records = []
346
+ for text, df_text in df.groupby("text"):
347
+ responses = []
348
+ for rater_id, df_raters in df_text.groupby("rater_id"):
349
+ responses.append(
350
+ {
351
+ "values": {"label": {"value": df_raters["labels"].iloc[0].tolist()}},
352
+ "status": "submitted",
353
+ "user_id": uuid.UUID(int=rater_id),
354
+ }
355
+ )
356
+ suggested_labels = labels_prepared.get(df_raters["id"].iloc[0], None)
357
+ if not suggested_labels:
358
+ continue
359
+ suggestion = [
360
+ {
361
+ "question_name": "label",
362
+ "value": suggested_labels,
363
+ "type": "human",
364
+ }
365
+ ]
366
+ records.append(
367
+ rg.FeedbackRecord(
368
+ fields={"text": df_raters["text"].iloc[0]},
369
+ responses=responses,
370
+ suggestions=suggestion
371
+ )
372
+ )
373
+
374
+
375
+ feedback_dataset.add_records(records)
376
+
377
+ # Push to the hub
378
+ feedback_dataset.push_to_huggingface("plaguss/go_emotions_raw")
379
+ ```
380
+
381
  ### Curation Rationale
382
 
383
  [More Information Needed]