albertvillanova HF staff commited on
Commit
b3a375b
β€’
1 Parent(s): 675a13f

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (bf446c28e394d34942439ece305066d8e000cc4b)
- Add emotion data files (2d948f1722cd37b0958acedbbb4c50df5d3783d0)
- Add hate data files (02a5ceb555b982fa24beb13722977f1b64013039)
- Add irony data files (d0b81754adc6f44a439a2866dada5a1f0f1b55f6)
- Add offensive data files (9b98abef0b187e34801efc69f20641e9b60a0961)
- Add sentiment data files (6fe62d6334fb7f8197508d057f6351333dbe7646)
- Add stance_abortion data files (55864a3201f347e9edfac045e7f772eb2c0ee801)
- Add stance_atheism data files (449c8d551b86ba45c3c37b511f1f8d7ea91ec3ec)
- Add stance_climate data files (ff01bee78da3a8ec1c5ab3bf7d9d4c8f5121f24f)
- Add stance_feminist data files (5ea3f6097320422a56ac6677a992bb34fd58420f)
- Add stance_hillary data files (c460af45e62a2692f21d9755cfe8b3b9de801b4a)
- Delete loading script (68be50101769462a8a0b5fa46572a84b7d78d72e)
- Delete legacy dataset_infos.json (684f8540f99a7aad647599b97a3219ddc2801c0a)

Files changed (36) hide show
  1. README.md +156 -67
  2. dataset_infos.json +0 -1
  3. emoji/test-00000-of-00001.parquet +3 -0
  4. emoji/train-00000-of-00001.parquet +3 -0
  5. emoji/validation-00000-of-00001.parquet +3 -0
  6. emotion/test-00000-of-00001.parquet +3 -0
  7. emotion/train-00000-of-00001.parquet +3 -0
  8. emotion/validation-00000-of-00001.parquet +3 -0
  9. hate/test-00000-of-00001.parquet +3 -0
  10. hate/train-00000-of-00001.parquet +3 -0
  11. hate/validation-00000-of-00001.parquet +3 -0
  12. irony/test-00000-of-00001.parquet +3 -0
  13. irony/train-00000-of-00001.parquet +3 -0
  14. irony/validation-00000-of-00001.parquet +3 -0
  15. offensive/test-00000-of-00001.parquet +3 -0
  16. offensive/train-00000-of-00001.parquet +3 -0
  17. offensive/validation-00000-of-00001.parquet +3 -0
  18. sentiment/test-00000-of-00001.parquet +3 -0
  19. sentiment/train-00000-of-00001.parquet +3 -0
  20. sentiment/validation-00000-of-00001.parquet +3 -0
  21. stance_abortion/test-00000-of-00001.parquet +3 -0
  22. stance_abortion/train-00000-of-00001.parquet +3 -0
  23. stance_abortion/validation-00000-of-00001.parquet +3 -0
  24. stance_atheism/test-00000-of-00001.parquet +3 -0
  25. stance_atheism/train-00000-of-00001.parquet +3 -0
  26. stance_atheism/validation-00000-of-00001.parquet +3 -0
  27. stance_climate/test-00000-of-00001.parquet +3 -0
  28. stance_climate/train-00000-of-00001.parquet +3 -0
  29. stance_climate/validation-00000-of-00001.parquet +3 -0
  30. stance_feminist/test-00000-of-00001.parquet +3 -0
  31. stance_feminist/train-00000-of-00001.parquet +3 -0
  32. stance_feminist/validation-00000-of-00001.parquet +3 -0
  33. stance_hillary/test-00000-of-00001.parquet +3 -0
  34. stance_hillary/train-00000-of-00001.parquet +3 -0
  35. stance_hillary/validation-00000-of-00001.parquet +3 -0
  36. tweet_eval.py +0 -249
README.md CHANGED
@@ -24,6 +24,18 @@ task_ids:
24
  - sentiment-classification
25
  paperswithcode_id: tweeteval
26
  pretty_name: TweetEval
 
 
 
 
 
 
 
 
 
 
 
 
27
  dataset_info:
28
  - config_name: emoji
29
  features:
@@ -55,16 +67,16 @@ dataset_info:
55
  '19': 😜
56
  splits:
57
  - name: train
58
- num_bytes: 3803187
59
  num_examples: 45000
60
  - name: test
61
- num_bytes: 4255921
62
  num_examples: 50000
63
  - name: validation
64
- num_bytes: 396083
65
  num_examples: 5000
66
- download_size: 7628721
67
- dataset_size: 8455191
68
  - config_name: emotion
69
  features:
70
  - name: text
@@ -79,16 +91,16 @@ dataset_info:
79
  '3': sadness
80
  splits:
81
  - name: train
82
- num_bytes: 338875
83
  num_examples: 3257
84
  - name: test
85
- num_bytes: 146649
86
  num_examples: 1421
87
  - name: validation
88
- num_bytes: 38277
89
  num_examples: 374
90
- download_size: 483813
91
- dataset_size: 523801
92
  - config_name: hate
93
  features:
94
  - name: text
@@ -101,16 +113,16 @@ dataset_info:
101
  '1': hate
102
  splits:
103
  - name: train
104
- num_bytes: 1223654
105
  num_examples: 9000
106
  - name: test
107
- num_bytes: 428938
108
  num_examples: 2970
109
  - name: validation
110
- num_bytes: 154148
111
  num_examples: 1000
112
- download_size: 1703208
113
- dataset_size: 1806740
114
  - config_name: irony
115
  features:
116
  - name: text
@@ -123,16 +135,16 @@ dataset_info:
123
  '1': irony
124
  splits:
125
  - name: train
126
- num_bytes: 259191
127
  num_examples: 2862
128
  - name: test
129
- num_bytes: 75901
130
  num_examples: 784
131
  - name: validation
132
- num_bytes: 86021
133
  num_examples: 955
134
- download_size: 385613
135
- dataset_size: 421113
136
  - config_name: offensive
137
  features:
138
  - name: text
@@ -145,16 +157,16 @@ dataset_info:
145
  '1': offensive
146
  splits:
147
  - name: train
148
- num_bytes: 1648069
149
  num_examples: 11916
150
  - name: test
151
- num_bytes: 135477
152
  num_examples: 860
153
  - name: validation
154
- num_bytes: 192421
155
  num_examples: 1324
156
- download_size: 1863383
157
- dataset_size: 1975967
158
  - config_name: sentiment
159
  features:
160
  - name: text
@@ -168,16 +180,16 @@ dataset_info:
168
  '2': positive
169
  splits:
170
  - name: train
171
- num_bytes: 5425142
172
  num_examples: 45615
173
  - name: test
174
- num_bytes: 1279548
175
  num_examples: 12284
176
  - name: validation
177
- num_bytes: 239088
178
  num_examples: 2000
179
- download_size: 6465841
180
- dataset_size: 6943778
181
  - config_name: stance_abortion
182
  features:
183
  - name: text
@@ -191,16 +203,16 @@ dataset_info:
191
  '2': favor
192
  splits:
193
  - name: train
194
- num_bytes: 68698
195
  num_examples: 587
196
  - name: test
197
- num_bytes: 33175
198
  num_examples: 280
199
  - name: validation
200
- num_bytes: 7661
201
  num_examples: 66
202
- download_size: 102062
203
- dataset_size: 109534
204
  - config_name: stance_atheism
205
  features:
206
  - name: text
@@ -214,16 +226,16 @@ dataset_info:
214
  '2': favor
215
  splits:
216
  - name: train
217
- num_bytes: 54779
218
  num_examples: 461
219
  - name: test
220
- num_bytes: 25720
221
  num_examples: 220
222
  - name: validation
223
- num_bytes: 6324
224
  num_examples: 52
225
- download_size: 80947
226
- dataset_size: 86823
227
  - config_name: stance_climate
228
  features:
229
  - name: text
@@ -237,16 +249,16 @@ dataset_info:
237
  '2': favor
238
  splits:
239
  - name: train
240
- num_bytes: 40253
241
  num_examples: 355
242
  - name: test
243
- num_bytes: 19929
244
  num_examples: 169
245
  - name: validation
246
- num_bytes: 4805
247
  num_examples: 40
248
- download_size: 60463
249
- dataset_size: 64987
250
  - config_name: stance_feminist
251
  features:
252
  - name: text
@@ -260,16 +272,16 @@ dataset_info:
260
  '2': favor
261
  splits:
262
  - name: train
263
- num_bytes: 70513
264
  num_examples: 597
265
  - name: test
266
- num_bytes: 33309
267
  num_examples: 285
268
  - name: validation
269
- num_bytes: 8039
270
  num_examples: 67
271
- download_size: 104257
272
- dataset_size: 111861
273
  - config_name: stance_hillary
274
  features:
275
  - name: text
@@ -283,16 +295,105 @@ dataset_info:
283
  '2': favor
284
  splits:
285
  - name: train
286
- num_bytes: 69600
287
  num_examples: 620
288
  - name: test
289
- num_bytes: 34491
290
  num_examples: 295
291
  - name: validation
292
- num_bytes: 7536
293
  num_examples: 69
294
- download_size: 103745
295
- dataset_size: 111627
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  train-eval-index:
297
  - config: emotion
298
  task: text-classification
@@ -510,18 +611,6 @@ train-eval-index:
510
  name: Recall weighted
511
  args:
512
  average: weighted
513
- config_names:
514
- - emoji
515
- - emotion
516
- - hate
517
- - irony
518
- - offensive
519
- - sentiment
520
- - stance_abortion
521
- - stance_atheism
522
- - stance_climate
523
- - stance_feminist
524
- - stance_hillary
525
  ---
526
 
527
  # Dataset Card for tweet_eval
24
  - sentiment-classification
25
  paperswithcode_id: tweeteval
26
  pretty_name: TweetEval
27
+ config_names:
28
+ - emoji
29
+ - emotion
30
+ - hate
31
+ - irony
32
+ - offensive
33
+ - sentiment
34
+ - stance_abortion
35
+ - stance_atheism
36
+ - stance_climate
37
+ - stance_feminist
38
+ - stance_hillary
39
  dataset_info:
40
  - config_name: emoji
41
  features:
67
  '19': 😜
68
  splits:
69
  - name: train
70
+ num_bytes: 3803167
71
  num_examples: 45000
72
  - name: test
73
+ num_bytes: 4255901
74
  num_examples: 50000
75
  - name: validation
76
+ num_bytes: 396079
77
  num_examples: 5000
78
+ download_size: 5939308
79
+ dataset_size: 8455147
80
  - config_name: emotion
81
  features:
82
  - name: text
91
  '3': sadness
92
  splits:
93
  - name: train
94
+ num_bytes: 338871
95
  num_examples: 3257
96
  - name: test
97
+ num_bytes: 146645
98
  num_examples: 1421
99
  - name: validation
100
+ num_bytes: 38273
101
  num_examples: 374
102
+ download_size: 367016
103
+ dataset_size: 523789
104
  - config_name: hate
105
  features:
106
  - name: text
113
  '1': hate
114
  splits:
115
  - name: train
116
+ num_bytes: 1223650
117
  num_examples: 9000
118
  - name: test
119
+ num_bytes: 428934
120
  num_examples: 2970
121
  - name: validation
122
+ num_bytes: 154144
123
  num_examples: 1000
124
+ download_size: 1196346
125
+ dataset_size: 1806728
126
  - config_name: irony
127
  features:
128
  - name: text
135
  '1': irony
136
  splits:
137
  - name: train
138
+ num_bytes: 259187
139
  num_examples: 2862
140
  - name: test
141
+ num_bytes: 75897
142
  num_examples: 784
143
  - name: validation
144
+ num_bytes: 86017
145
  num_examples: 955
146
+ download_size: 297647
147
+ dataset_size: 421101
148
  - config_name: offensive
149
  features:
150
  - name: text
157
  '1': offensive
158
  splits:
159
  - name: train
160
+ num_bytes: 1648061
161
  num_examples: 11916
162
  - name: test
163
+ num_bytes: 135473
164
  num_examples: 860
165
  - name: validation
166
+ num_bytes: 192417
167
  num_examples: 1324
168
+ download_size: 1234528
169
+ dataset_size: 1975951
170
  - config_name: sentiment
171
  features:
172
  - name: text
180
  '2': positive
181
  splits:
182
  - name: train
183
+ num_bytes: 5425122
184
  num_examples: 45615
185
  - name: test
186
+ num_bytes: 1279540
187
  num_examples: 12284
188
  - name: validation
189
+ num_bytes: 239084
190
  num_examples: 2000
191
+ download_size: 4849675
192
+ dataset_size: 6943746
193
  - config_name: stance_abortion
194
  features:
195
  - name: text
203
  '2': favor
204
  splits:
205
  - name: train
206
+ num_bytes: 68694
207
  num_examples: 587
208
  - name: test
209
+ num_bytes: 33171
210
  num_examples: 280
211
  - name: validation
212
+ num_bytes: 7657
213
  num_examples: 66
214
+ download_size: 73517
215
+ dataset_size: 109522
216
  - config_name: stance_atheism
217
  features:
218
  - name: text
226
  '2': favor
227
  splits:
228
  - name: train
229
+ num_bytes: 54775
230
  num_examples: 461
231
  - name: test
232
+ num_bytes: 25716
233
  num_examples: 220
234
  - name: validation
235
+ num_bytes: 6320
236
  num_examples: 52
237
+ download_size: 62265
238
+ dataset_size: 86811
239
  - config_name: stance_climate
240
  features:
241
  - name: text
249
  '2': favor
250
  splits:
251
  - name: train
252
+ num_bytes: 40249
253
  num_examples: 355
254
  - name: test
255
+ num_bytes: 19925
256
  num_examples: 169
257
  - name: validation
258
+ num_bytes: 4801
259
  num_examples: 40
260
+ download_size: 48493
261
+ dataset_size: 64975
262
  - config_name: stance_feminist
263
  features:
264
  - name: text
272
  '2': favor
273
  splits:
274
  - name: train
275
+ num_bytes: 70509
276
  num_examples: 597
277
  - name: test
278
+ num_bytes: 33305
279
  num_examples: 285
280
  - name: validation
281
+ num_bytes: 8035
282
  num_examples: 67
283
+ download_size: 76345
284
+ dataset_size: 111849
285
  - config_name: stance_hillary
286
  features:
287
  - name: text
295
  '2': favor
296
  splits:
297
  - name: train
298
+ num_bytes: 69596
299
  num_examples: 620
300
  - name: test
301
+ num_bytes: 34487
302
  num_examples: 295
303
  - name: validation
304
+ num_bytes: 7532
305
  num_examples: 69
306
+ download_size: 74057
307
+ dataset_size: 111615
308
+ configs:
309
+ - config_name: emoji
310
+ data_files:
311
+ - split: train
312
+ path: emoji/train-*
313
+ - split: test
314
+ path: emoji/test-*
315
+ - split: validation
316
+ path: emoji/validation-*
317
+ - config_name: emotion
318
+ data_files:
319
+ - split: train
320
+ path: emotion/train-*
321
+ - split: test
322
+ path: emotion/test-*
323
+ - split: validation
324
+ path: emotion/validation-*
325
+ - config_name: hate
326
+ data_files:
327
+ - split: train
328
+ path: hate/train-*
329
+ - split: test
330
+ path: hate/test-*
331
+ - split: validation
332
+ path: hate/validation-*
333
+ - config_name: irony
334
+ data_files:
335
+ - split: train
336
+ path: irony/train-*
337
+ - split: test
338
+ path: irony/test-*
339
+ - split: validation
340
+ path: irony/validation-*
341
+ - config_name: offensive
342
+ data_files:
343
+ - split: train
344
+ path: offensive/train-*
345
+ - split: test
346
+ path: offensive/test-*
347
+ - split: validation
348
+ path: offensive/validation-*
349
+ - config_name: sentiment
350
+ data_files:
351
+ - split: train
352
+ path: sentiment/train-*
353
+ - split: test
354
+ path: sentiment/test-*
355
+ - split: validation
356
+ path: sentiment/validation-*
357
+ - config_name: stance_abortion
358
+ data_files:
359
+ - split: train
360
+ path: stance_abortion/train-*
361
+ - split: test
362
+ path: stance_abortion/test-*
363
+ - split: validation
364
+ path: stance_abortion/validation-*
365
+ - config_name: stance_atheism
366
+ data_files:
367
+ - split: train
368
+ path: stance_atheism/train-*
369
+ - split: test
370
+ path: stance_atheism/test-*
371
+ - split: validation
372
+ path: stance_atheism/validation-*
373
+ - config_name: stance_climate
374
+ data_files:
375
+ - split: train
376
+ path: stance_climate/train-*
377
+ - split: test
378
+ path: stance_climate/test-*
379
+ - split: validation
380
+ path: stance_climate/validation-*
381
+ - config_name: stance_feminist
382
+ data_files:
383
+ - split: train
384
+ path: stance_feminist/train-*
385
+ - split: test
386
+ path: stance_feminist/test-*
387
+ - split: validation
388
+ path: stance_feminist/validation-*
389
+ - config_name: stance_hillary
390
+ data_files:
391
+ - split: train
392
+ path: stance_hillary/train-*
393
+ - split: test
394
+ path: stance_hillary/test-*
395
+ - split: validation
396
+ path: stance_hillary/validation-*
397
  train-eval-index:
398
  - config: emotion
399
  task: text-classification
611
  name: Recall weighted
612
  args:
613
  average: weighted
 
 
 
 
 
 
 
 
 
 
 
 
614
  ---
615
 
616
  # Dataset Card for tweet_eval
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"emoji": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 20, "names": ["\u2764", "\ud83d\ude0d", "\ud83d\ude02", "\ud83d\udc95", "\ud83d\udd25", "\ud83d\ude0a", "\ud83d\ude0e", "\u2728", "\ud83d\udc99", "\ud83d\ude18", "\ud83d\udcf7", "\ud83c\uddfa\ud83c\uddf8", "\u2600", "\ud83d\udc9c", "\ud83d\ude09", "\ud83d\udcaf", "\ud83d\ude01", "\ud83c\udf84", "\ud83d\udcf8", "\ud83d\ude1c"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "emoji", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3803187, "num_examples": 45000, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 4255921, "num_examples": 50000, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 396083, "num_examples": 5000, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/train_text.txt": {"num_bytes": 3353167, "checksum": "eacb6b0ee1fe2803d72a009c2e731fe07659f604318a979951d2f07c23c564a1"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/train_labels.txt": {"num_bytes": 102760, "checksum": "daee7da826683dbfa50ad3a29c60bc527e498f06c70eabee3745a99cc37ab3a5"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/test_text.txt": {"num_bytes": 3705901, "checksum": "e4de11de1597842c431dd67868e83322f5a432564dfd8558889ed8ac6a1a5e09"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/test_labels.txt": {"num_bytes": 114435, "checksum": "c1662b84788f36674ab8f0106f3e2e7d3e258ddf4959086ac7cc75b1e68dd1f6"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/val_text.txt": {"num_bytes": 341079, "checksum": "3bc3742d6af404cea792671878684d110f3bc02fd79a2e34643789a521d81a26"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/val_labels.txt": {"num_bytes": 11379, "checksum": "21ba456f688668d049ff0fb1fa04469ee684cf4e2467c71d2c3fe5ca2ba1bd1a"}}, "download_size": 7628721, "post_processing_size": null, "dataset_size": 8455191, "size_in_bytes": 16083912}, "emotion": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 4, "names": ["anger", "joy", "optimism", "sadness"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "emotion", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 338875, "num_examples": 3257, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 146649, "num_examples": 1421, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 38277, "num_examples": 374, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/train_text.txt": {"num_bytes": 306630, "checksum": "2c62f67aeb3eac1aea0e5a9c3d0f4bc337992581f3f858061786a1fb4d79d95e"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/train_labels.txt": {"num_bytes": 6514, "checksum": "987e767d8679e18abdf7de37a6d2bcd0a40a296ddd704e8d515cf0e3033c8d9c"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/test_text.txt": {"num_bytes": 132523, "checksum": "7e1070f5d3e3fcece5bc73680bff9981e90d8f7b2f1009bfe7a01d059d1c6091"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/test_labels.txt": {"num_bytes": 2842, "checksum": "245072348c711961785be6d395997f97cf7fcda3effeae7805664171dc75f913"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/val_text.txt": {"num_bytes": 34556, "checksum": "e2e30c86b8cbb97944d6543aedc06eace3bb275cb2f381aba787b838b4f23ca5"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emotion/val_labels.txt": {"num_bytes": 748, "checksum": "313730630160b7e0a6b4235b800c76683f4aeeb72d094eb69646630cd5cfe338"}}, "download_size": 483813, "post_processing_size": null, "dataset_size": 523801, "size_in_bytes": 1007614}, "hate": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["non-hate", "hate"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "hate", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1223654, "num_examples": 9000, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 428938, "num_examples": 2970, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 154148, "num_examples": 1000, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/train_text.txt": {"num_bytes": 1133852, "checksum": "6572bb3a42143128a5dfa99af8debeb0668e637c34b2d1e3140dac47316fe2c2"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/train_labels.txt": {"num_bytes": 18000, "checksum": "4e8fde025a453a25c94632794254131dedeac4e57228ad64157c41571cc88f71"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/test_text.txt": {"num_bytes": 399242, "checksum": "bc4762876a8dd8baa55c3cd7b03108e3231a5d691e80b8b1ef97c5be31b9da9a"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/test_labels.txt": {"num_bytes": 5940, "checksum": "c14adca6b3627616a835c5ccea8a1cceb0235cd79417257f093eb0e16a69c62f"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/val_text.txt": {"num_bytes": 144174, "checksum": "1ff78b1ed4c5ce43284b9eba32eb7d60c6d45d0d1b3b4d6df456ae01640764f1"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/hate/val_labels.txt": {"num_bytes": 2000, "checksum": "5092badf1a0e70036ea6264bcd0b78afc07d0f4a512fa6af34c2c4973600656b"}}, "download_size": 1703208, "post_processing_size": null, "dataset_size": 1806740, "size_in_bytes": 3509948}, "irony": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["non_irony", "irony"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "irony", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 259191, "num_examples": 2862, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 75901, "num_examples": 784, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 86021, "num_examples": 955, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/train_text.txt": {"num_bytes": 231594, "checksum": "a888125a44f7dfaa25b026318748d0e62cc9a300d20f66eafd62011a19eaea23"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/train_labels.txt": {"num_bytes": 5724, "checksum": "fc69e6106c0f1f433a91536e08f83c71a391d7b219f7684d42f243a8089af77d"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/test_text.txt": {"num_bytes": 68057, "checksum": "53103da934a7308eee82f05f2a9781a8ea3e88604fdc1e02d3101108505c64be"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/test_labels.txt": {"num_bytes": 1568, "checksum": "08e2095e1725e74907a380614c220204e356bb46e3e8c93deb74e83e5b15ab38"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/val_text.txt": {"num_bytes": 76760, "checksum": "8806cf3793e300a485cfae34892fc3a0a2f9a183deb06c750c6531515c83051e"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/irony/val_labels.txt": {"num_bytes": 1910, "checksum": "ccf429f63b4e8d0e7f425ca09445f7c31f7cea8a1b7c283b015b117c4002fd07"}}, "download_size": 385613, "post_processing_size": null, "dataset_size": 421113, "size_in_bytes": 806726}, "offensive": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["non-offensive", "offensive"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "offensive", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1648069, "num_examples": 11916, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 135477, "num_examples": 860, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 192421, "num_examples": 1324, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/train_text.txt": {"num_bytes": 1529074, "checksum": "78a7a32e38b10af7d8970b008bf17f661c8d0a90dad145fa0fa6a944669650db"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/train_labels.txt": {"num_bytes": 23832, "checksum": "c0b7d6ebdaa4ebcf6fc557ef1e775d92eda160218a0e3b1dd48eb8234dc892a6"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/test_text.txt": {"num_bytes": 126921, "checksum": "25b08c3333c26190f1023961c4508ec9aab24d4722b1a3ea7a6040724c120547"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/test_labels.txt": {"num_bytes": 1720, "checksum": "41d05a7aa0b01f5dafab21b95adb4f979cb4226c046ff315702774d10dac1605"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/val_text.txt": {"num_bytes": 179188, "checksum": "816f36d180c35f15a5104838cb73856a0bef42043482fe738f3481b06242a55c"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/offensive/val_labels.txt": {"num_bytes": 2648, "checksum": "ed2deb776bd1c52fb8221fadd3360e32d9dfe46842d78053528126e46363a258"}}, "download_size": 1863383, "post_processing_size": null, "dataset_size": 1975967, "size_in_bytes": 3839350}, "sentiment": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["negative", "neutral", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "sentiment", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5425142, "num_examples": 45615, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 1279548, "num_examples": 12284, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 239088, "num_examples": 2000, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/train_text.txt": {"num_bytes": 4970029, "checksum": "368f01052ea6fd8ffc408a2a2e6ac9669e31542581a0396ef16591ea26eb98a6"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/train_labels.txt": {"num_bytes": 91230, "checksum": "122bfb1732fb6995b0e5c5f726c0ba457c469c3b6e60513007ce5037f23e65d4"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/test_text.txt": {"num_bytes": 1156877, "checksum": "09a93a55c63fd93f97485ef7302889d7edb4091cd49733aa37da094f0bfa0675"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/test_labels.txt": {"num_bytes": 24568, "checksum": "6afb4afe9374d1f983bcf9a7c79b108d0f37fdf020a83f30488309bed215db9d"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/val_text.txt": {"num_bytes": 219137, "checksum": "e5b021e6fc45064c260b09814b803d8f56cada519c4d952d72f43d48a350a964"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/sentiment/val_labels.txt": {"num_bytes": 4000, "checksum": "b4566926c72e2e4e2916c864def94e76c4cdde52446af2c7ba4fc2006e057e51"}}, "download_size": 6465841, "post_processing_size": null, "dataset_size": 6943778, "size_in_bytes": 13409619}, "stance_abortion": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_abortion", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 68698, "num_examples": 587, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 33175, "num_examples": 280, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 7661, "num_examples": 66, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/train_text.txt": {"num_bytes": 62828, "checksum": "a421d5b8fd9f972970b9275b83f65745bf81986d2a412b4caa2ba071f3efa916"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/train_labels.txt": {"num_bytes": 1174, "checksum": "e6786a594bd9a083c524a0f420c690351140b52af288f487cb4772d29675b014"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/test_text.txt": {"num_bytes": 30371, "checksum": "bf0e16a0b8ca4cf0ab90efbc560db3151c288fc842f5e3c6554e8589d521556a"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/test_labels.txt": {"num_bytes": 560, "checksum": "c90e6d36d863f876d6661620d37b613b4b07858a5277c8d6623713ee59ca451c"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/val_text.txt": {"num_bytes": 6997, "checksum": "0428ab3f2894936f2445a9020763c2bd19ed42986872168bb65886dede5843fd"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/abortion/val_labels.txt": {"num_bytes": 132, "checksum": "8df57a50823d5f3683ecf75d824a42e3b08eb52e25e3e2d6928f523097a0c050"}}, "download_size": 102062, "post_processing_size": null, "dataset_size": 109534, "size_in_bytes": 211596}, "stance_atheism": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_atheism", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 54779, "num_examples": 461, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 25720, "num_examples": 220, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 6324, "num_examples": 52, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/train_text.txt": {"num_bytes": 50165, "checksum": "0e82f1d4a16d79a38a68aee761762cf8a846bc8f7f9395670ca44e2ecf2f58f7"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/train_labels.txt": {"num_bytes": 922, "checksum": "a764aac1a75ccb32c4ffc4c03c66dc365cb50f013d3e94549bf775636cbc8373"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/test_text.txt": {"num_bytes": 23516, "checksum": "16c5336b2cba606ca63a6afcc50241be63a8fccf021628c6505449439b9d54b3"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/test_labels.txt": {"num_bytes": 440, "checksum": "4ef7c9398d265cfac625092c834e43cef9da9cb318e563493abb64f65dfe1b52"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/val_text.txt": {"num_bytes": 5800, "checksum": "5fe14c4c01f87a45dba640dddbb1d1909a893f9565f159c48fa1ba35bb46c209"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/atheism/val_labels.txt": {"num_bytes": 104, "checksum": "638095b3582f927fd1481cdb8d1f9f670f8d27880baf32c0b26c5946fd8f8292"}}, "download_size": 80947, "post_processing_size": null, "dataset_size": 86823, "size_in_bytes": 167770}, "stance_climate": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_climate", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 40253, "num_examples": 355, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 19929, "num_examples": 169, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 4805, "num_examples": 40, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/train_text.txt": {"num_bytes": 36699, "checksum": "4803211832d318026323a8e5014cff1b95e1c8c3854378101e5d1a8c82582eb7"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/train_labels.txt": {"num_bytes": 710, "checksum": "d6274f55bc95f5a7f2ae591b886c1414a7664aaf4e0c609f4ba6cf377929af18"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/test_text.txt": {"num_bytes": 18235, "checksum": "41ee8ee2ad3c36e0629654fdb271f37775197c79be8b299adbeadd2003b63c53"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/test_labels.txt": {"num_bytes": 338, "checksum": "193c9f2358f61d9efe558324ec89ecaf08e600a44b68128f47838c01d9f98dfd"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/val_text.txt": {"num_bytes": 4401, "checksum": "fc5714703add266801ee2fd98296ea20ec0879e89cdb9f906d9812d9f640f2ba"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/climate/val_labels.txt": {"num_bytes": 80, "checksum": "0cb133ab9b137292f075210db45f7e293dc52798a4e21e59037bfcfe66c97aa6"}}, "download_size": 60463, "post_processing_size": null, "dataset_size": 64987, "size_in_bytes": 125450}, "stance_feminist": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_feminist", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 70513, "num_examples": 597, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 33309, "num_examples": 285, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 8039, "num_examples": 67, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/train_text.txt": {"num_bytes": 64539, "checksum": "c176e6663973c8e78bfa92ba1e8874a70cc5358567d71584a90943bc6525eaab"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/train_labels.txt": {"num_bytes": 1194, "checksum": "abd4f196d801423bb0daba8c0ecf5b3efba1f10e8f410c3dfa360b50c8b9c685"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/test_text.txt": {"num_bytes": 30455, "checksum": "1bfdbdc2af64fd62dcc775d1288e192ac8ff805ef27ccf3aaac54a98616eefda"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/test_labels.txt": {"num_bytes": 570, "checksum": "ddbde6d253ee47c5d5ef8bc5386270fde45cf088d3be70bba9c382b8a024897a"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/val_text.txt": {"num_bytes": 7365, "checksum": "3518b2ddcf696626a7243d7cea720a975718c7a52a5a086931be87897c1de58b"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/feminist/val_labels.txt": {"num_bytes": 134, "checksum": "399e0d468d0e4ead7a445f69efdf35876c835acf4cefc00a16f451a5d42e5c13"}}, "download_size": 104257, "post_processing_size": null, "dataset_size": 111861, "size_in_bytes": 216118}, "stance_hillary": {"description": "TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.\n", "citation": "@inproceedings{barbieri2020tweeteval,\n title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},\n author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},\n booktitle={Proceedings of Findings of EMNLP},\n year={2020}\n}\n", "homepage": "https://github.com/cardiffnlp/tweeteval", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["none", "against", "favor"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": " tweet_eval", "config_name": "stance_hillary", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 69600, "num_examples": 620, "dataset_name": " tweet_eval"}, "test": {"name": "test", "num_bytes": 34491, "num_examples": 295, "dataset_name": " tweet_eval"}, "validation": {"name": "validation", "num_bytes": 7536, "num_examples": 69, "dataset_name": " tweet_eval"}}, "download_checksums": {"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/train_text.txt": {"num_bytes": 63398, "checksum": "0bd735de895cb74d63c224e64e3d955cac99be97aa225f803fe4d2f5978a2c99"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/train_labels.txt": {"num_bytes": 1240, "checksum": "0ea5753d13a717a9e91581d1d89c0b5206c8f905f0a717b2b27d02dbf419250d"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/test_text.txt": {"num_bytes": 31537, "checksum": "5c4e020285a62cfd88f264849e1db242ded356c171b1a68dd0050b76635053aa"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/test_labels.txt": {"num_bytes": 590, "checksum": "068468f6a72b85dfb65bf10e45f2453fa082d1ea9d7a40e7f560d5b6d75027f3"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/val_text.txt": {"num_bytes": 6842, "checksum": "9714b7dcc8617e095433d7b63df8aa155eb84216b9ac9195105ab83d85cd248d"}, "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/stance/hillary/val_labels.txt": {"num_bytes": 138, "checksum": "e5d44c771b7349a4a74309f56ca072fdf8f1c015068d519ca2ed3a931c833606"}}, "download_size": 103745, "post_processing_size": null, "dataset_size": 111627, "size_in_bytes": 215372}}
 
emoji/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95e248466f9153ba8ee6f990f0ad26c7d77520326d77e697aaf7e09df350e65d
3
+ size 3047341
emoji/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4f587c86b7459b727ae1c84d39ca7fd42631e51d43cc3f95242dcd4c696077a
3
+ size 2609973
emoji/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48f3d0140d19153a1a14e2940d62b63dcdfb24c4b958c65bd9ea1dfb777f391a
3
+ size 281994
emotion/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:460be5e853577cad9e070549a3bb0eaecccfa277e07db69873fc8f46bd57299f
3
+ size 105421
emotion/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:448fb3d7b94184ca25652d7361d82bbc50c20cd0d10a984ce3596a1aa7e1d75c
3
+ size 233004
emotion/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20e78c12dc5a17c2011a708cc838b8b0251b87f624f71dbdd354a2e3857aab73
3
+ size 28591
hate/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c07de83b04e013b956c66187e1fd454fadb6f4caacc23e11f5fb3f80ddc2abd2
3
+ size 277846
hate/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da2c391c2050df2d90d3d9e7b8bca48a18056293138b5c6f08c7bf61c5b84668
3
+ size 815722
hate/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7307944af6e873feb693a9c0853439af8d0543e571ae8a2c4cfc91e01016c68
3
+ size 102778
irony/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6bcf4eae70637cacc9239773684a37cbfa3ef98fde5f173355651afee477862
3
+ size 53984
irony/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:541de8c14f53024660fa5ae1534489b51106c5f4905da294e47ded7e09d73ad0
3
+ size 182571
irony/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ac02a5943b02808f1998dfacba7fce89033be6b3e4bbb465a26faafabf21191
3
+ size 61092
offensive/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15b87adaf1850d8e3e104be6240255813d48d776940be546ad353fee9fcfb099
3
+ size 93731
offensive/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c2cb443cb5b956aa8578a6e18b2f9e50ce43217774e3cf171d53e16a1db81a3
3
+ size 1019132
offensive/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1161b4e6cc7cbdaa667d4fc131f9be0ea3db755616126329dda7eb521cd88c36
3
+ size 121665
sentiment/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73aea52d43879223019e733b944780f7e9203ee32459ea006f554011379fcdf
3
+ size 900553
sentiment/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884ed7990ba4058aa78372f5f9590db308c393820545d9c4f2363f5a2ea12fbf
3
+ size 3781982
sentiment/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba8e7513047d4991bcdcc0a22aaa70777e47b609f1b739da5117baf7893f3e56
3
+ size 167140
stance_abortion/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c25f9b438767cbc25f143beac32f7ade213baa5c436be32733e0b718d1140f6
3
+ size 22513
stance_abortion/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d90ce50ba5ed14d7600bc9ca919c2ac2b9f58141b80d63b96fa17027607bd20
3
+ size 43712
stance_abortion/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8ac5a9696577bb1622b12bec44ac8222f26f2109cb6a2c0abc27305fa93bf56
3
+ size 7292
stance_atheism/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b92549ee5a2d90448a1fac4ed509765e1b166abb91705e997a62a54e3606127c
3
+ size 19358
stance_atheism/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d915d95b5a313db91f3737d095aa89db6b9e749c7838e00fe844ff5a6f98661
3
+ size 36467
stance_atheism/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f10c1952169a56231bf2f4c9548e98843219311170fec4fe1fd4557cc1b85f9c
3
+ size 6440
stance_climate/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cda4111271a268e0d2513d8effa704395f59ce2b8ece90f93e4a96d2c16dc8b9
3
+ size 14897
stance_climate/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:248ba3695bba19f1fcef9c65421d81c65fa3a76570f1d7d3766872022e96b224
3
+ size 28127
stance_climate/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f4d174e3f9f7c526b532409fa4ca59abd27d9eb35c03df4daa2e9e64366f8c5
3
+ size 5469
stance_feminist/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2230a3e35acd62ace8d77aa6fd8731a724833d540f732a75fb6327f2cdf1e0ea
3
+ size 23416
stance_feminist/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6ae1f6ee44c1e0ac0523bab6278febf8a62387305c24aad3ea9701868b81e1a
3
+ size 45301
stance_feminist/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bc15f3d903ba99da50a200e211e1506c8117378310f2fd54edb69d7d5d70493
3
+ size 7628
stance_hillary/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62f0fcbf8356ce7df510365dae39c2928b7edf75e0576c09f5196f8483315d2a
3
+ size 23516
stance_hillary/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61b7730fdffe10728554b85b3d36af454df6185fcebf214697215965a28dc364
3
+ size 43297
stance_hillary/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:366e5594562feaadbfddf307b8d5940461fff70efd7654bcbee9dde0d8c32068
3
+ size 7244
tweet_eval.py DELETED
@@ -1,249 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """The Tweet Eval Datasets"""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @inproceedings{barbieri2020tweeteval,
23
- title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},
24
- author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},
25
- booktitle={Proceedings of Findings of EMNLP},
26
- year={2020}
27
- }
28
- """
29
-
30
- _DESCRIPTION = """\
31
- TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.
32
- """
33
-
34
- _HOMEPAGE = "https://github.com/cardiffnlp/tweeteval"
35
-
36
- _LICENSE = ""
37
-
38
- URL = "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/"
39
-
40
- _URLs = {
41
- "emoji": {
42
- "train_text": URL + "emoji/train_text.txt",
43
- "train_labels": URL + "emoji/train_labels.txt",
44
- "test_text": URL + "emoji/test_text.txt",
45
- "test_labels": URL + "emoji/test_labels.txt",
46
- "val_text": URL + "emoji/val_text.txt",
47
- "val_labels": URL + "emoji/val_labels.txt",
48
- },
49
- "emotion": {
50
- "train_text": URL + "emotion/train_text.txt",
51
- "train_labels": URL + "emotion/train_labels.txt",
52
- "test_text": URL + "emotion/test_text.txt",
53
- "test_labels": URL + "emotion/test_labels.txt",
54
- "val_text": URL + "emotion/val_text.txt",
55
- "val_labels": URL + "emotion/val_labels.txt",
56
- },
57
- "hate": {
58
- "train_text": URL + "hate/train_text.txt",
59
- "train_labels": URL + "hate/train_labels.txt",
60
- "test_text": URL + "hate/test_text.txt",
61
- "test_labels": URL + "hate/test_labels.txt",
62
- "val_text": URL + "hate/val_text.txt",
63
- "val_labels": URL + "hate/val_labels.txt",
64
- },
65
- "irony": {
66
- "train_text": URL + "irony/train_text.txt",
67
- "train_labels": URL + "irony/train_labels.txt",
68
- "test_text": URL + "irony/test_text.txt",
69
- "test_labels": URL + "irony/test_labels.txt",
70
- "val_text": URL + "irony/val_text.txt",
71
- "val_labels": URL + "irony/val_labels.txt",
72
- },
73
- "offensive": {
74
- "train_text": URL + "offensive/train_text.txt",
75
- "train_labels": URL + "offensive/train_labels.txt",
76
- "test_text": URL + "offensive/test_text.txt",
77
- "test_labels": URL + "offensive/test_labels.txt",
78
- "val_text": URL + "offensive/val_text.txt",
79
- "val_labels": URL + "offensive/val_labels.txt",
80
- },
81
- "sentiment": {
82
- "train_text": URL + "sentiment/train_text.txt",
83
- "train_labels": URL + "sentiment/train_labels.txt",
84
- "test_text": URL + "sentiment/test_text.txt",
85
- "test_labels": URL + "sentiment/test_labels.txt",
86
- "val_text": URL + "sentiment/val_text.txt",
87
- "val_labels": URL + "sentiment/val_labels.txt",
88
- },
89
- "stance": {
90
- "abortion": {
91
- "train_text": URL + "stance/abortion/train_text.txt",
92
- "train_labels": URL + "stance/abortion/train_labels.txt",
93
- "test_text": URL + "stance/abortion/test_text.txt",
94
- "test_labels": URL + "stance/abortion/test_labels.txt",
95
- "val_text": URL + "stance/abortion/val_text.txt",
96
- "val_labels": URL + "stance/abortion/val_labels.txt",
97
- },
98
- "atheism": {
99
- "train_text": URL + "stance/atheism/train_text.txt",
100
- "train_labels": URL + "stance/atheism/train_labels.txt",
101
- "test_text": URL + "stance/atheism/test_text.txt",
102
- "test_labels": URL + "stance/atheism/test_labels.txt",
103
- "val_text": URL + "stance/atheism/val_text.txt",
104
- "val_labels": URL + "stance/atheism/val_labels.txt",
105
- },
106
- "climate": {
107
- "train_text": URL + "stance/climate/train_text.txt",
108
- "train_labels": URL + "stance/climate/train_labels.txt",
109
- "test_text": URL + "stance/climate/test_text.txt",
110
- "test_labels": URL + "stance/climate/test_labels.txt",
111
- "val_text": URL + "stance/climate/val_text.txt",
112
- "val_labels": URL + "stance/climate/val_labels.txt",
113
- },
114
- "feminist": {
115
- "train_text": URL + "stance/feminist/train_text.txt",
116
- "train_labels": URL + "stance/feminist/train_labels.txt",
117
- "test_text": URL + "stance/feminist/test_text.txt",
118
- "test_labels": URL + "stance/feminist/test_labels.txt",
119
- "val_text": URL + "stance/feminist/val_text.txt",
120
- "val_labels": URL + "stance/feminist/val_labels.txt",
121
- },
122
- "hillary": {
123
- "train_text": URL + "stance/hillary/train_text.txt",
124
- "train_labels": URL + "stance/hillary/train_labels.txt",
125
- "test_text": URL + "stance/hillary/test_text.txt",
126
- "test_labels": URL + "stance/hillary/test_labels.txt",
127
- "val_text": URL + "stance/hillary/val_text.txt",
128
- "val_labels": URL + "stance/hillary/val_labels.txt",
129
- },
130
- },
131
- }
132
-
133
-
134
- class TweetEvalConfig(datasets.BuilderConfig):
135
- def __init__(self, *args, type=None, sub_type=None, **kwargs):
136
- super().__init__(
137
- *args,
138
- name=f"{type}" if type != "stance" else f"{type}_{sub_type}",
139
- **kwargs,
140
- )
141
- self.type = type
142
- self.sub_type = sub_type
143
-
144
-
145
- class TweetEval(datasets.GeneratorBasedBuilder):
146
- """TweetEval Dataset."""
147
-
148
- BUILDER_CONFIGS = [
149
- TweetEvalConfig(
150
- type=key,
151
- sub_type=None,
152
- version=datasets.Version("1.1.0"),
153
- description=f"This part of my dataset covers {key} part of TweetEval Dataset.",
154
- )
155
- for key in list(_URLs.keys())
156
- if key != "stance"
157
- ] + [
158
- TweetEvalConfig(
159
- type="stance",
160
- sub_type=key,
161
- version=datasets.Version("1.1.0"),
162
- description=f"This part of my dataset covers stance_{key} part of TweetEval Dataset.",
163
- )
164
- for key in list(_URLs["stance"].keys())
165
- ]
166
-
167
- def _info(self):
168
- if self.config.type == "stance":
169
- names = ["none", "against", "favor"]
170
- elif self.config.type == "sentiment":
171
- names = ["negative", "neutral", "positive"]
172
- elif self.config.type == "offensive":
173
- names = ["non-offensive", "offensive"]
174
- elif self.config.type == "irony":
175
- names = ["non_irony", "irony"]
176
- elif self.config.type == "hate":
177
- names = ["non-hate", "hate"]
178
- elif self.config.type == "emoji":
179
- names = [
180
- "❀",
181
- "😍",
182
- "πŸ˜‚",
183
- "πŸ’•",
184
- "πŸ”₯",
185
- "😊",
186
- "😎",
187
- "✨",
188
- "πŸ’™",
189
- "😘",
190
- "πŸ“·",
191
- "πŸ‡ΊπŸ‡Έ",
192
- "β˜€",
193
- "πŸ’œ",
194
- "πŸ˜‰",
195
- "πŸ’―",
196
- "😁",
197
- "πŸŽ„",
198
- "πŸ“Έ",
199
- "😜",
200
- ]
201
-
202
- else:
203
- names = ["anger", "joy", "optimism", "sadness"]
204
-
205
- return datasets.DatasetInfo(
206
- description=_DESCRIPTION,
207
- features=datasets.Features(
208
- {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=names)}
209
- ),
210
- supervised_keys=None,
211
- homepage=_HOMEPAGE,
212
- license=_LICENSE,
213
- citation=_CITATION,
214
- )
215
-
216
- def _split_generators(self, dl_manager):
217
- """Returns SplitGenerators."""
218
- if self.config.type != "stance":
219
- my_urls = _URLs[self.config.type]
220
- else:
221
- my_urls = _URLs[self.config.type][self.config.sub_type]
222
- data_dir = dl_manager.download_and_extract(my_urls)
223
- return [
224
- datasets.SplitGenerator(
225
- name=datasets.Split.TRAIN,
226
- # These kwargs will be passed to _generate_examples
227
- gen_kwargs={"text_path": data_dir["train_text"], "labels_path": data_dir["train_labels"]},
228
- ),
229
- datasets.SplitGenerator(
230
- name=datasets.Split.TEST,
231
- # These kwargs will be passed to _generate_examples
232
- gen_kwargs={"text_path": data_dir["test_text"], "labels_path": data_dir["test_labels"]},
233
- ),
234
- datasets.SplitGenerator(
235
- name=datasets.Split.VALIDATION,
236
- # These kwargs will be passed to _generate_examples
237
- gen_kwargs={"text_path": data_dir["val_text"], "labels_path": data_dir["val_labels"]},
238
- ),
239
- ]
240
-
241
- def _generate_examples(self, text_path, labels_path):
242
- """Yields examples."""
243
-
244
- with open(text_path, encoding="utf-8") as f:
245
- texts = f.readlines()
246
- with open(labels_path, encoding="utf-8") as f:
247
- labels = f.readlines()
248
- for i, text in enumerate(texts):
249
- yield i, {"text": text.strip(), "label": int(labels[i].strip())}