albertvillanova HF staff commited on
Commit
9bc9802
1 Parent(s): 47fe1be

Convert dataset to Parquet (#7)

Browse files

- Convert dataset to Parquet (4e139d217cf2d8e2b6b9cd15c68ccef954dcbc63)
- Add tnews data files (1bb29781aa9e9c6633dbbe674475cd901dcbe9e2)
- Add iflytek data files (5cc2740fa5325623a081f591332df3f4117b9b78)
- Add cmnli data files (ba18cf430d406bf6477448b59e15b7df1ff2ae91)
- Add cluewsc2020 data files (eb98877fe97052a2059f135ce951929d2626bb07)
- Add csl data files (477462bdfb996730654126fdce0d0714fce0c2bd)
- Add cmrc2018 data files (181729ba10a087174663e33e6e0278ef0fe0c210)
- Add drcd data files (e098cf9214d3ff58c6042f915044eaf27e1b0710)
- Add chid data files (0393f563a2baca984e9da3bb83a454ae2e5ebfd0)
- Add c3 data files (00245e345061a9ef7e38ecb67e72190bb588e32b)
- Add ocnli data files (2ac28faeeb78dee2c8726edb9199af7eefa94201)
- Add diagnostics data files (5c66abf19dee2b0c0fa02af00252350edcbf5075)
- Delete loading script (6fef312b4fc0327cfe6892eb2b6833de78f0ebc0)
- Delete legacy dataset_infos.json (7a92286c90a77a495dcaeba0fe34939053ef7dc5)

Files changed (38) hide show
  1. README.md +339 -244
  2. afqmc/test-00000-of-00001.parquet +3 -0
  3. afqmc/train-00000-of-00001.parquet +3 -0
  4. afqmc/validation-00000-of-00001.parquet +3 -0
  5. c3/test-00000-of-00001.parquet +3 -0
  6. c3/train-00000-of-00001.parquet +3 -0
  7. c3/validation-00000-of-00001.parquet +3 -0
  8. chid/test-00000-of-00001.parquet +3 -0
  9. chid/train-00000-of-00001.parquet +3 -0
  10. chid/validation-00000-of-00001.parquet +3 -0
  11. clue.py +0 -570
  12. cluewsc2020/test-00000-of-00001.parquet +3 -0
  13. cluewsc2020/train-00000-of-00001.parquet +3 -0
  14. cluewsc2020/validation-00000-of-00001.parquet +3 -0
  15. cmnli/test-00000-of-00001.parquet +3 -0
  16. cmnli/train-00000-of-00001.parquet +3 -0
  17. cmnli/validation-00000-of-00001.parquet +3 -0
  18. cmrc2018/test-00000-of-00001.parquet +3 -0
  19. cmrc2018/train-00000-of-00001.parquet +3 -0
  20. cmrc2018/trial-00000-of-00001.parquet +3 -0
  21. cmrc2018/validation-00000-of-00001.parquet +3 -0
  22. csl/test-00000-of-00001.parquet +3 -0
  23. csl/train-00000-of-00001.parquet +3 -0
  24. csl/validation-00000-of-00001.parquet +3 -0
  25. dataset_infos.json +0 -1
  26. diagnostics/test-00000-of-00001.parquet +3 -0
  27. drcd/test-00000-of-00001.parquet +3 -0
  28. drcd/train-00000-of-00001.parquet +3 -0
  29. drcd/validation-00000-of-00001.parquet +3 -0
  30. iflytek/test-00000-of-00001.parquet +3 -0
  31. iflytek/train-00000-of-00001.parquet +3 -0
  32. iflytek/validation-00000-of-00001.parquet +3 -0
  33. ocnli/test-00000-of-00001.parquet +3 -0
  34. ocnli/train-00000-of-00001.parquet +3 -0
  35. ocnli/validation-00000-of-00001.parquet +3 -0
  36. tnews/test-00000-of-00001.parquet +3 -0
  37. tnews/train-00000-of-00001.parquet +3 -0
  38. tnews/validation-00000-of-00001.parquet +3 -0
README.md CHANGED
@@ -43,53 +43,231 @@ dataset_info:
43
  dtype: int32
44
  splits:
45
  - name: test
46
- num_bytes: 378726
47
  num_examples: 3861
48
  - name: train
49
- num_bytes: 3396535
50
  num_examples: 34334
51
  - name: validation
52
- num_bytes: 426293
53
  num_examples: 4316
54
- download_size: 1195044
55
- dataset_size: 4201554
56
- - config_name: tnews
57
  features:
58
- - name: sentence
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  dtype: string
60
  - name: label
61
  dtype:
62
  class_label:
63
  names:
64
- '0': '100'
65
- '1': '101'
66
- '2': '102'
67
- '3': '103'
68
- '4': '104'
69
- '5': '106'
70
- '6': '107'
71
- '7': '108'
72
- '8': '109'
73
- '9': '110'
74
- '10': '112'
75
- '11': '113'
76
- '12': '114'
77
- '13': '115'
78
- '14': '116'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  - name: idx
80
  dtype: int32
81
  splits:
82
  - name: test
83
- num_bytes: 810974
84
- num_examples: 10000
85
  - name: train
86
- num_bytes: 4245701
87
- num_examples: 53360
88
  - name: validation
89
- num_bytes: 797926
90
- num_examples: 10000
91
- download_size: 5123575
92
- dataset_size: 5854601
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  - config_name: iflytek
94
  features:
95
  - name: sentence
@@ -221,17 +399,17 @@ dataset_info:
221
  dtype: int32
222
  splits:
223
  - name: test
224
- num_bytes: 2105688
225
  num_examples: 2600
226
  - name: train
227
- num_bytes: 10028613
228
  num_examples: 12133
229
  - name: validation
230
- num_bytes: 2157123
231
  num_examples: 2599
232
- download_size: 6505938
233
- dataset_size: 14291424
234
- - config_name: cmnli
235
  features:
236
  - name: sentence1
237
  dtype: string
@@ -248,231 +426,148 @@ dataset_info:
248
  dtype: int32
249
  splits:
250
  - name: test
251
- num_bytes: 2386837
252
- num_examples: 13880
253
  - name: train
254
- num_bytes: 67685309
255
- num_examples: 391783
256
  - name: validation
257
- num_bytes: 2051845
258
- num_examples: 12241
259
- download_size: 31404066
260
- dataset_size: 72123991
261
- - config_name: cluewsc2020
262
  features:
263
- - name: idx
264
- dtype: int32
265
- - name: text
266
  dtype: string
267
  - name: label
268
  dtype:
269
  class_label:
270
  names:
271
- '0': 'true'
272
- '1': 'false'
273
- - name: target
274
- struct:
275
- - name: span1_text
276
- dtype: string
277
- - name: span2_text
278
- dtype: string
279
- - name: span1_index
280
- dtype: int32
281
- - name: span2_index
282
- dtype: int32
283
- splits:
284
- - name: test
285
- num_bytes: 645649
286
- num_examples: 2574
287
- - name: train
288
- num_bytes: 288828
289
- num_examples: 1244
290
- - name: validation
291
- num_bytes: 72682
292
- num_examples: 304
293
- download_size: 281384
294
- dataset_size: 1007159
295
- - config_name: csl
296
- features:
297
  - name: idx
298
  dtype: int32
299
- - name: corpus_id
300
- dtype: int32
301
- - name: abst
302
- dtype: string
303
- - name: label
304
- dtype:
305
- class_label:
306
- names:
307
- '0': '0'
308
- '1': '1'
309
- - name: keyword
310
- sequence: string
311
  splits:
312
  - name: test
313
- num_bytes: 2463740
314
- num_examples: 3000
315
  - name: train
316
- num_bytes: 16478914
317
- num_examples: 20000
318
  - name: validation
319
- num_bytes: 2464575
320
- num_examples: 3000
321
- download_size: 3234594
322
- dataset_size: 21407229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  - config_name: cmrc2018
324
- features:
325
- - name: id
326
- dtype: string
327
- - name: context
328
- dtype: string
329
- - name: question
330
- dtype: string
331
- - name: answers
332
- sequence:
333
- - name: text
334
- dtype: string
335
- - name: answer_start
336
- dtype: int32
337
- splits:
338
- - name: test
339
- num_bytes: 3112066
340
- num_examples: 2000
341
- - name: train
342
- num_bytes: 15508110
343
- num_examples: 10142
344
- - name: validation
345
- num_bytes: 5183809
346
- num_examples: 3219
347
- - name: trial
348
- num_bytes: 1606931
349
- num_examples: 1002
350
- download_size: 3405146
351
- dataset_size: 25410916
352
  - config_name: drcd
353
- features:
354
- - name: id
355
- dtype: string
356
- - name: context
357
- dtype: string
358
- - name: question
359
- dtype: string
360
- - name: answers
361
- sequence:
362
- - name: text
363
- dtype: string
364
- - name: answer_start
365
- dtype: int32
366
- splits:
367
- - name: test
368
- num_bytes: 4982402
369
- num_examples: 3493
370
- - name: train
371
- num_bytes: 37443458
372
- num_examples: 26936
373
- - name: validation
374
- num_bytes: 5222753
375
- num_examples: 3524
376
- download_size: 7264200
377
- dataset_size: 47648613
378
- - config_name: chid
379
- features:
380
- - name: idx
381
- dtype: int32
382
- - name: candidates
383
- sequence: string
384
- - name: content
385
- sequence: string
386
- - name: answers
387
- sequence:
388
- - name: text
389
- dtype: string
390
- - name: candidate_id
391
- dtype: int32
392
- splits:
393
- - name: test
394
- num_bytes: 11480463
395
- num_examples: 3447
396
- - name: train
397
- num_bytes: 252478178
398
- num_examples: 84709
399
- - name: validation
400
- num_bytes: 10117789
401
- num_examples: 3218
402
- download_size: 139199202
403
- dataset_size: 274076430
404
- - config_name: c3
405
- features:
406
- - name: id
407
- dtype: int32
408
- - name: context
409
- sequence: string
410
- - name: question
411
- dtype: string
412
- - name: choice
413
- sequence: string
414
- - name: answer
415
- dtype: string
416
- splits:
417
- - name: test
418
- num_bytes: 1600166
419
- num_examples: 1625
420
- - name: train
421
- num_bytes: 9672787
422
- num_examples: 11869
423
- - name: validation
424
- num_bytes: 2990967
425
- num_examples: 3816
426
- download_size: 3495930
427
- dataset_size: 14263920
428
  - config_name: ocnli
429
- features:
430
- - name: sentence1
431
- dtype: string
432
- - name: sentence2
433
- dtype: string
434
- - name: label
435
- dtype:
436
- class_label:
437
- names:
438
- '0': neutral
439
- '1': entailment
440
- '2': contradiction
441
- - name: idx
442
- dtype: int32
443
- splits:
444
- - name: test
445
- num_bytes: 376066
446
- num_examples: 3000
447
- - name: train
448
- num_bytes: 6187190
449
- num_examples: 50437
450
- - name: validation
451
- num_bytes: 366235
452
- num_examples: 2950
453
- download_size: 4359754
454
- dataset_size: 6929491
455
- - config_name: diagnostics
456
- features:
457
- - name: sentence1
458
- dtype: string
459
- - name: sentence2
460
- dtype: string
461
- - name: label
462
- dtype:
463
- class_label:
464
- names:
465
- '0': neutral
466
- '1': entailment
467
- '2': contradiction
468
- - name: idx
469
- dtype: int32
470
- splits:
471
- - name: test
472
- num_bytes: 42400
473
- num_examples: 514
474
- download_size: 12062
475
- dataset_size: 42400
476
  ---
477
 
478
  # Dataset Card for "clue"
43
  dtype: int32
44
  splits:
45
  - name: test
46
+ num_bytes: 378718
47
  num_examples: 3861
48
  - name: train
49
+ num_bytes: 3396503
50
  num_examples: 34334
51
  - name: validation
52
+ num_bytes: 426285
53
  num_examples: 4316
54
+ download_size: 2337418
55
+ dataset_size: 4201506
56
+ - config_name: c3
57
  features:
58
+ - name: id
59
+ dtype: int32
60
+ - name: context
61
+ sequence: string
62
+ - name: question
63
+ dtype: string
64
+ - name: choice
65
+ sequence: string
66
+ - name: answer
67
+ dtype: string
68
+ splits:
69
+ - name: test
70
+ num_bytes: 1600142
71
+ num_examples: 1625
72
+ - name: train
73
+ num_bytes: 9672739
74
+ num_examples: 11869
75
+ - name: validation
76
+ num_bytes: 2990943
77
+ num_examples: 3816
78
+ download_size: 4718960
79
+ dataset_size: 14263824
80
+ - config_name: chid
81
+ features:
82
+ - name: idx
83
+ dtype: int32
84
+ - name: candidates
85
+ sequence: string
86
+ - name: content
87
+ sequence: string
88
+ - name: answers
89
+ sequence:
90
+ - name: text
91
+ dtype: string
92
+ - name: candidate_id
93
+ dtype: int32
94
+ splits:
95
+ - name: test
96
+ num_bytes: 11480435
97
+ num_examples: 3447
98
+ - name: train
99
+ num_bytes: 252477926
100
+ num_examples: 84709
101
+ - name: validation
102
+ num_bytes: 10117761
103
+ num_examples: 3218
104
+ download_size: 198468807
105
+ dataset_size: 274076122
106
+ - config_name: cluewsc2020
107
+ features:
108
+ - name: idx
109
+ dtype: int32
110
+ - name: text
111
  dtype: string
112
  - name: label
113
  dtype:
114
  class_label:
115
  names:
116
+ '0': 'true'
117
+ '1': 'false'
118
+ - name: target
119
+ struct:
120
+ - name: span1_text
121
+ dtype: string
122
+ - name: span2_text
123
+ dtype: string
124
+ - name: span1_index
125
+ dtype: int32
126
+ - name: span2_index
127
+ dtype: int32
128
+ splits:
129
+ - name: test
130
+ num_bytes: 645637
131
+ num_examples: 2574
132
+ - name: train
133
+ num_bytes: 288816
134
+ num_examples: 1244
135
+ - name: validation
136
+ num_bytes: 72670
137
+ num_examples: 304
138
+ download_size: 380611
139
+ dataset_size: 1007123
140
+ - config_name: cmnli
141
+ features:
142
+ - name: sentence1
143
+ dtype: string
144
+ - name: sentence2
145
+ dtype: string
146
+ - name: label
147
+ dtype:
148
+ class_label:
149
+ names:
150
+ '0': neutral
151
+ '1': entailment
152
+ '2': contradiction
153
  - name: idx
154
  dtype: int32
155
  splits:
156
  - name: test
157
+ num_bytes: 2386821
158
+ num_examples: 13880
159
  - name: train
160
+ num_bytes: 67684989
161
+ num_examples: 391783
162
  - name: validation
163
+ num_bytes: 2051829
164
+ num_examples: 12241
165
+ download_size: 54234919
166
+ dataset_size: 72123639
167
+ - config_name: cmrc2018
168
+ features:
169
+ - name: id
170
+ dtype: string
171
+ - name: context
172
+ dtype: string
173
+ - name: question
174
+ dtype: string
175
+ - name: answers
176
+ sequence:
177
+ - name: text
178
+ dtype: string
179
+ - name: answer_start
180
+ dtype: int32
181
+ splits:
182
+ - name: test
183
+ num_bytes: 3112042
184
+ num_examples: 2000
185
+ - name: train
186
+ num_bytes: 15508062
187
+ num_examples: 10142
188
+ - name: validation
189
+ num_bytes: 5183785
190
+ num_examples: 3219
191
+ - name: trial
192
+ num_bytes: 1606907
193
+ num_examples: 1002
194
+ download_size: 5459001
195
+ dataset_size: 25410796
196
+ - config_name: csl
197
+ features:
198
+ - name: idx
199
+ dtype: int32
200
+ - name: corpus_id
201
+ dtype: int32
202
+ - name: abst
203
+ dtype: string
204
+ - name: label
205
+ dtype:
206
+ class_label:
207
+ names:
208
+ '0': '0'
209
+ '1': '1'
210
+ - name: keyword
211
+ sequence: string
212
+ splits:
213
+ - name: test
214
+ num_bytes: 2463728
215
+ num_examples: 3000
216
+ - name: train
217
+ num_bytes: 16478890
218
+ num_examples: 20000
219
+ - name: validation
220
+ num_bytes: 2464563
221
+ num_examples: 3000
222
+ download_size: 3936111
223
+ dataset_size: 21407181
224
+ - config_name: diagnostics
225
+ features:
226
+ - name: sentence1
227
+ dtype: string
228
+ - name: sentence2
229
+ dtype: string
230
+ - name: label
231
+ dtype:
232
+ class_label:
233
+ names:
234
+ '0': neutral
235
+ '1': entailment
236
+ '2': contradiction
237
+ - name: idx
238
+ dtype: int32
239
+ splits:
240
+ - name: test
241
+ num_bytes: 42392
242
+ num_examples: 514
243
+ download_size: 23000
244
+ dataset_size: 42392
245
+ - config_name: drcd
246
+ features:
247
+ - name: id
248
+ dtype: string
249
+ - name: context
250
+ dtype: string
251
+ - name: question
252
+ dtype: string
253
+ - name: answers
254
+ sequence:
255
+ - name: text
256
+ dtype: string
257
+ - name: answer_start
258
+ dtype: int32
259
+ splits:
260
+ - name: test
261
+ num_bytes: 4982378
262
+ num_examples: 3493
263
+ - name: train
264
+ num_bytes: 37443386
265
+ num_examples: 26936
266
+ - name: validation
267
+ num_bytes: 5222729
268
+ num_examples: 3524
269
+ download_size: 11188875
270
+ dataset_size: 47648493
271
  - config_name: iflytek
272
  features:
273
  - name: sentence
399
  dtype: int32
400
  splits:
401
  - name: test
402
+ num_bytes: 2105684
403
  num_examples: 2600
404
  - name: train
405
+ num_bytes: 10028605
406
  num_examples: 12133
407
  - name: validation
408
+ num_bytes: 2157119
409
  num_examples: 2599
410
+ download_size: 9777855
411
+ dataset_size: 14291408
412
+ - config_name: ocnli
413
  features:
414
  - name: sentence1
415
  dtype: string
426
  dtype: int32
427
  splits:
428
  - name: test
429
+ num_bytes: 376058
430
+ num_examples: 3000
431
  - name: train
432
+ num_bytes: 6187142
433
+ num_examples: 50437
434
  - name: validation
435
+ num_bytes: 366227
436
+ num_examples: 2950
437
+ download_size: 3000218
438
+ dataset_size: 6929427
439
+ - config_name: tnews
440
  features:
441
+ - name: sentence
 
 
442
  dtype: string
443
  - name: label
444
  dtype:
445
  class_label:
446
  names:
447
+ '0': '100'
448
+ '1': '101'
449
+ '2': '102'
450
+ '3': '103'
451
+ '4': '104'
452
+ '5': '106'
453
+ '6': '107'
454
+ '7': '108'
455
+ '8': '109'
456
+ '9': '110'
457
+ '10': '112'
458
+ '11': '113'
459
+ '12': '114'
460
+ '13': '115'
461
+ '14': '116'
 
 
 
 
 
 
 
 
 
 
 
462
  - name: idx
463
  dtype: int32
 
 
 
 
 
 
 
 
 
 
 
 
464
  splits:
465
  - name: test
466
+ num_bytes: 810970
467
+ num_examples: 10000
468
  - name: train
469
+ num_bytes: 4245677
470
+ num_examples: 53360
471
  - name: validation
472
+ num_bytes: 797922
473
+ num_examples: 10000
474
+ download_size: 4697843
475
+ dataset_size: 5854569
476
+ configs:
477
+ - config_name: afqmc
478
+ data_files:
479
+ - split: test
480
+ path: afqmc/test-*
481
+ - split: train
482
+ path: afqmc/train-*
483
+ - split: validation
484
+ path: afqmc/validation-*
485
+ - config_name: c3
486
+ data_files:
487
+ - split: test
488
+ path: c3/test-*
489
+ - split: train
490
+ path: c3/train-*
491
+ - split: validation
492
+ path: c3/validation-*
493
+ - config_name: chid
494
+ data_files:
495
+ - split: test
496
+ path: chid/test-*
497
+ - split: train
498
+ path: chid/train-*
499
+ - split: validation
500
+ path: chid/validation-*
501
+ - config_name: cluewsc2020
502
+ data_files:
503
+ - split: test
504
+ path: cluewsc2020/test-*
505
+ - split: train
506
+ path: cluewsc2020/train-*
507
+ - split: validation
508
+ path: cluewsc2020/validation-*
509
+ - config_name: cmnli
510
+ data_files:
511
+ - split: test
512
+ path: cmnli/test-*
513
+ - split: train
514
+ path: cmnli/train-*
515
+ - split: validation
516
+ path: cmnli/validation-*
517
  - config_name: cmrc2018
518
+ data_files:
519
+ - split: test
520
+ path: cmrc2018/test-*
521
+ - split: train
522
+ path: cmrc2018/train-*
523
+ - split: validation
524
+ path: cmrc2018/validation-*
525
+ - split: trial
526
+ path: cmrc2018/trial-*
527
+ - config_name: csl
528
+ data_files:
529
+ - split: test
530
+ path: csl/test-*
531
+ - split: train
532
+ path: csl/train-*
533
+ - split: validation
534
+ path: csl/validation-*
535
+ - config_name: diagnostics
536
+ data_files:
537
+ - split: test
538
+ path: diagnostics/test-*
 
 
 
 
 
 
 
539
  - config_name: drcd
540
+ data_files:
541
+ - split: test
542
+ path: drcd/test-*
543
+ - split: train
544
+ path: drcd/train-*
545
+ - split: validation
546
+ path: drcd/validation-*
547
+ - config_name: iflytek
548
+ data_files:
549
+ - split: test
550
+ path: iflytek/test-*
551
+ - split: train
552
+ path: iflytek/train-*
553
+ - split: validation
554
+ path: iflytek/validation-*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555
  - config_name: ocnli
556
+ data_files:
557
+ - split: test
558
+ path: ocnli/test-*
559
+ - split: train
560
+ path: ocnli/train-*
561
+ - split: validation
562
+ path: ocnli/validation-*
563
+ - config_name: tnews
564
+ data_files:
565
+ - split: test
566
+ path: tnews/test-*
567
+ - split: train
568
+ path: tnews/train-*
569
+ - split: validation
570
+ path: tnews/validation-*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
571
  ---
572
 
573
  # Dataset Card for "clue"
afqmc/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:820473142e26d9505dc6641204a3f30c73faecf745950fc4208033645984a395
3
+ size 211060
afqmc/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:122bf3834b87d577b2443b8060480a9e6ebe12e6bddd014cf9910efa3e6fd4c0
3
+ size 1886804
afqmc/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85af960b1ffce42d7340365786d606153ce2291b5e325093a9a78378d23108ac
3
+ size 239554
c3/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3fced6e7d3cae320c82aba9b6b1196b36da104277812d08b731c1c581a2a2f3
3
+ size 475638
c3/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03c0c92fcb9b88b127a2d57d674c1f246beebdaeba80c68728a7d1755b83c22a
3
+ size 3209206
c3/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0069ce2680aaad10d737c26d85b8a44b210d5195fa96d13e475d16117868f6fd
3
+ size 1034116
chid/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3c2fae53b49c583ea832c4fc334393d87b8ceca8329b2d724aa46d39baed08a
3
+ size 8693688
chid/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f28b2eb381d0e522b4b77e938712feca9524dd0e9e5f84bf1e35029002e9767e
3
+ size 182424589
chid/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41ac76386a396910ac99803634c4a80ed8bccd4e6c1e4795e0d965c889bfeb16
3
+ size 7350530
clue.py DELETED
@@ -1,570 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """A Chinese Language Understanding Evaluation Benchmark (CLUE) benchmark."""
18
-
19
-
20
- import json
21
- import os
22
- import re
23
- import textwrap
24
-
25
- import datasets
26
-
27
-
28
- _CLUE_CITATION = """\
29
- @misc{xu2020clue,
30
- title={CLUE: A Chinese Language Understanding Evaluation Benchmark},
31
- author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},
32
- year={2020},
33
- eprint={2004.05986},
34
- archivePrefix={arXiv},
35
- primaryClass={cs.CL}
36
- }
37
- """
38
-
39
- _CLUE_DESCRIPTION = """\
40
- CLUE, A Chinese Language Understanding Evaluation Benchmark
41
- (https://www.cluebenchmarks.com/) is a collection of resources for training,
42
- evaluating, and analyzing Chinese language understanding systems.
43
-
44
- """
45
-
46
-
47
- class ClueConfig(datasets.BuilderConfig):
48
- """BuilderConfig for CLUE."""
49
-
50
- def __init__(
51
- self,
52
- data_url,
53
- text_features=None,
54
- label_column=None,
55
- data_dir="",
56
- citation="",
57
- url="",
58
- label_classes=None,
59
- process_label=lambda x: x,
60
- **kwargs,
61
- ):
62
- """BuilderConfig for CLUE.
63
-
64
- Args:
65
- text_features: `dict[string, string]`, map from the name of the feature
66
- dict for each text field to the name of the column in the tsv file
67
- label_column: `string`, name of the column in the tsv file corresponding
68
- to the label
69
- data_url: `string`, url to download the zip file from
70
- data_dir: `string`, the path to the folder containing the tsv files in the
71
- downloaded zip
72
- citation: `string`, citation for the data set
73
- url: `string`, url for information about the data set
74
- label_classes: `list[string]`, the list of classes if the label is
75
- categorical. If not provided, then the label will be of type
76
- `datasets.Value('float32')`.
77
- process_label: `Function[string, any]`, function taking in the raw value
78
- of the label and processing it to the form required by the label feature
79
- **kwargs: keyword arguments forwarded to super.
80
- """
81
- super(ClueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
82
- self.text_features = text_features
83
- self.label_column = label_column
84
- self.label_classes = label_classes
85
- self.data_url = data_url
86
- self.data_dir = data_dir
87
- self.citation = citation
88
- self.url = url
89
- self.process_label = process_label
90
-
91
-
92
- class Clue(datasets.GeneratorBasedBuilder):
93
- """A Chinese Language Understanding Evaluation Benchmark (CLUE) benchmark."""
94
-
95
- BUILDER_CONFIGS = [
96
- ClueConfig(
97
- name="afqmc",
98
- description=textwrap.dedent(
99
- """\
100
- Ant Financial Question Matching Corpus is a dataset for Chinese
101
- question matching (similar to QQP).
102
- """
103
- ),
104
- text_features={"sentence1": "sentence1", "sentence2": "sentence2"},
105
- label_classes=["0", "1"],
106
- label_column="label",
107
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/afqmc_public.zip",
108
- url="https://dc.cloud.alipay.com/index#/topic/data?id=8",
109
- ),
110
- ClueConfig(
111
- name="tnews",
112
- description=textwrap.dedent(
113
- """\
114
- Toutiao Short Text Classification for News is a dataset for Chinese
115
- short news classification.
116
- """
117
- ),
118
- text_features={"sentence": "sentence"},
119
- label_classes=[
120
- "100",
121
- "101",
122
- "102",
123
- "103",
124
- "104",
125
- "106",
126
- "107",
127
- "108",
128
- "109",
129
- "110",
130
- "112",
131
- "113",
132
- "114",
133
- "115",
134
- "116",
135
- ],
136
- label_column="label",
137
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/tnews_public.zip",
138
- url="https://github.com/skdjfla/toutiao-text-classfication-dataset",
139
- ),
140
- ClueConfig(
141
- name="iflytek",
142
- description=textwrap.dedent(
143
- """\
144
- IFLYTEK Long Text Classification for News is a dataset for Chinese
145
- long text classification. The text is crawled from an app market.
146
- """
147
- ),
148
- text_features={"sentence": "sentence"},
149
- label_classes=[str(label) for label in range(119)],
150
- label_column="label",
151
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/iflytek_public.zip",
152
- ),
153
- ClueConfig(
154
- name="cmnli",
155
- description=textwrap.dedent(
156
- """\
157
- Chinese Multi-Genre NLI is a dataset for Chinese Natural Language
158
- Inference. It consists of XNLI (Chinese subset) and translated MNLI.
159
- """
160
- ),
161
- text_features={"sentence1": "sentence1", "sentence2": "sentence2"},
162
- label_classes=["neutral", "entailment", "contradiction"],
163
- label_column="label",
164
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/cmnli_public.zip",
165
- data_dir="cmnli_public",
166
- ),
167
- ClueConfig(
168
- name="cluewsc2020",
169
- description=textwrap.dedent(
170
- """\
171
- CLUE Winograd Scheme Challenge (CLUEWSC 2020) is a Chinese WSC dataset.
172
- The text is from contemporary literature and annotated by human experts.
173
- The task is to determine which noun the pronoun in the sentence refers to.
174
- The question appears in the form of true and false discrimination.
175
- """
176
- ),
177
- text_features={"text": "text", "target": "target"},
178
- label_classes=["true", "false"],
179
- label_column="label",
180
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/cluewsc2020_public.zip",
181
- ),
182
- ClueConfig(
183
- name="csl",
184
- description=textwrap.dedent(
185
- """\
186
- Chinese Scientific Literature Dataset (CSL) is taken from the abstracts of
187
- Chinese papers and their keywords. The papers are selected from some core
188
- journals of Chinese social sciences and natural sciences. TF-IDF is used to
189
- generate a mixture of fake keywords and real keywords in the paper to construct
190
- abstract-keyword pairs. The task goal is to judge whether the keywords are
191
- all real keywords based on the abstract.
192
- """
193
- ),
194
- text_features={"abst": "abst", "keyword": "keyword", "corpus_id": "id"},
195
- label_classes=["0", "1"],
196
- label_column="label",
197
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/csl_public.zip",
198
- url="https://github.com/P01son6415/CSL",
199
- ),
200
- ClueConfig(
201
- name="cmrc2018",
202
- description=textwrap.dedent(
203
- """\
204
- CMRC2018 is the first Chinese Span-Extraction Machine Reading Comprehension
205
- Dataset. The task requires to set up a system that reads context,
206
- question and extract the answer from the context (the answer is a continuous
207
- span in the context).
208
- """
209
- ),
210
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/cmrc2018_public.zip",
211
- url="https://hfl-rc.github.io/cmrc2018/",
212
- citation=textwrap.dedent(
213
- """\
214
- @article{cmrc2018-dataset,
215
- title={A Span-Extraction Dataset for Chinese Machine Reading Comprehension},
216
- author={Cui, Yiming and Liu, Ting and Xiao, Li and Chen, Zhipeng and Ma, Wentao and Che, Wanxiang and Wang, Shijin and Hu, Guoping},
217
- journal={arXiv preprint arXiv:1810.07366},
218
- year={2018}
219
- }"""
220
- ),
221
- ),
222
- ClueConfig(
223
- name="drcd",
224
- description=textwrap.dedent(
225
- """\
226
- Delta Reading Comprehension Dataset (DRCD) belongs to the general field of traditional
227
- Chinese machine reading comprehension data set. This data set is expected to become a
228
- standard Chinese reading comprehension data set suitable for transfer learning.
229
- """
230
- ),
231
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/drcd_public.zip",
232
- url="https://github.com/DRCKnowledgeTeam/DRCD",
233
- ),
234
- ClueConfig(
235
- name="chid",
236
- description=textwrap.dedent(
237
- """\
238
- Chinese IDiom Dataset for Cloze Test (CHID) contains many masked idioms in the text.
239
- The candidates contain similar idioms to the real ones.
240
- """
241
- ),
242
- text_features={"candidates": "candidates", "content": "content"},
243
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/chid_public.zip",
244
- url="https://arxiv.org/abs/1906.01265",
245
- citation=textwrap.dedent(
246
- """\
247
- @article{Zheng_2019,
248
- title={ChID: A Large-scale Chinese IDiom Dataset for Cloze Test},
249
- url={http://dx.doi.org/10.18653/v1/P19-1075},
250
- DOI={10.18653/v1/p19-1075},
251
- journal={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
252
- publisher={Association for Computational Linguistics},
253
- author={Zheng, Chujie and Huang, Minlie and Sun, Aixin},
254
- year={2019}
255
- }"""
256
- ),
257
- ),
258
- ClueConfig(
259
- name="c3",
260
- description=textwrap.dedent(
261
- """\
262
- Multiple-Choice Chinese Machine Reading Comprehension (C3, or C^3) is a Chinese
263
- multi-choice reading comprehension data set, including mixed type data sets
264
- such as dialogue and long text. Both the training and validation sets are
265
- the concatenation of the dialogue and long-text subsets.
266
- """
267
- ),
268
- text_features={"candidates": "candidates", "content": "content"},
269
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/c3_public.zip",
270
- url="https://arxiv.org/abs/1904.09679",
271
- citation=textwrap.dedent(
272
- """\
273
- @article{sun2020investigating,
274
- author = {Kai Sun and
275
- Dian Yu and
276
- Dong Yu and
277
- Claire Cardie},
278
- title = {Investigating Prior Knowledge for Challenging Chinese Machine Reading
279
- Comprehension},
280
- journal = {Trans. Assoc. Comput. Linguistics},
281
- volume = {8},
282
- pages = {141--155},
283
- year = {2020},
284
- url = {https://transacl.org/ojs/index.php/tacl/article/view/1882}
285
- }"""
286
- ),
287
- ),
288
- ClueConfig(
289
- name="ocnli",
290
- description=textwrap.dedent(
291
- """\
292
- OCNLI stands for Original Chinese Natural Language Inference. It is a corpus for
293
- Chinese Natural Language Inference, collected following closely the procedures of MNLI,
294
- but with enhanced strategies aiming for more challenging inference pairs. We want to
295
- emphasize we did not use human/machine translation in creating the dataset, and thus
296
- our Chinese texts are original and not translated.
297
- """
298
- ),
299
- text_features={"sentence1": "sentence1", "sentence2": "sentence2"},
300
- label_classes=["neutral", "entailment", "contradiction"],
301
- label_column="label",
302
- # From: https://github.com/CLUEbenchmark/OCNLI/archive/02d55cb3c7dc984682677b8dd81db6a1e4710720.zip
303
- data_url={
304
- "test": "https://raw.githubusercontent.com/CLUEbenchmark/OCNLI/02d55cb3c7dc984682677b8dd81db6a1e4710720/data/ocnli/test.json",
305
- "train": "https://raw.githubusercontent.com/CLUEbenchmark/OCNLI/02d55cb3c7dc984682677b8dd81db6a1e4710720/data/ocnli/train.json",
306
- "validation": "https://raw.githubusercontent.com/CLUEbenchmark/OCNLI/02d55cb3c7dc984682677b8dd81db6a1e4710720/data/ocnli/dev.json",
307
- },
308
- url="https://arxiv.org/abs/2010.05444",
309
- citation=textwrap.dedent(
310
- """\
311
- @inproceedings{ocnli,
312
- title={OCNLI: Original Chinese Natural Language Inference},
313
- author={Hai Hu and Kyle Richardson and Liang Xu and Lu Li and Sandra Kuebler and Larry Moss},
314
- booktitle={Findings of EMNLP},
315
- year={2020},
316
- url={https://arxiv.org/abs/2010.05444}
317
- }"""
318
- ),
319
- ),
320
- ClueConfig(
321
- name="diagnostics",
322
- description=textwrap.dedent(
323
- """\
324
- Diagnostic set, used to evaluate the performance of different models on 9 Chinese language
325
- phenomena summarized by linguists.
326
-
327
- Use the model trained on CMNLI to directly predict the result on this diagnostic set.
328
- """
329
- ),
330
- text_features={"sentence1": "premise", "sentence2": "hypothesis"},
331
- label_classes=["neutral", "entailment", "contradiction"],
332
- label_column="label",
333
- data_url="https://storage.googleapis.com/cluebenchmark/tasks/clue_diagnostics_public.zip",
334
- ),
335
- ]
336
-
337
- def _info(self):
338
- if self.config.name in ["afqmc", "tnews", "iflytek", "cmnli", "diagnostics", "ocnli"]:
339
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
340
- if self.config.label_classes:
341
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
342
- else:
343
- features["label"] = datasets.Value("float32")
344
- features["idx"] = datasets.Value("int32")
345
- elif self.config.name == "cluewsc2020":
346
- features = {
347
- "idx": datasets.Value("int32"),
348
- "text": datasets.Value("string"),
349
- "label": datasets.features.ClassLabel(names=self.config.label_classes),
350
- "target": {
351
- "span1_text": datasets.Value("string"),
352
- "span2_text": datasets.Value("string"),
353
- "span1_index": datasets.Value("int32"),
354
- "span2_index": datasets.Value("int32"),
355
- },
356
- }
357
- elif self.config.name == "csl":
358
- features = {
359
- "idx": datasets.Value("int32"),
360
- "corpus_id": datasets.Value("int32"),
361
- "abst": datasets.Value("string"),
362
- "label": datasets.features.ClassLabel(names=self.config.label_classes),
363
- "keyword": datasets.Sequence(datasets.Value("string")),
364
- }
365
- elif self.config.name in ["cmrc2018", "drcd"]:
366
- features = {
367
- "id": datasets.Value("string"),
368
- "context": datasets.Value("string"),
369
- "question": datasets.Value("string"),
370
- "answers": datasets.Sequence(
371
- {
372
- "text": datasets.Value("string"),
373
- "answer_start": datasets.Value("int32"),
374
- }
375
- ),
376
- }
377
- elif self.config.name == "chid":
378
- features = {
379
- "idx": datasets.Value("int32"),
380
- "candidates": datasets.Sequence(datasets.Value("string")),
381
- "content": datasets.Sequence(datasets.Value("string")),
382
- "answers": datasets.features.Sequence(
383
- {
384
- "text": datasets.Value("string"),
385
- "candidate_id": datasets.Value("int32"),
386
- }
387
- ),
388
- }
389
- elif self.config.name == "c3":
390
- features = {
391
- "id": datasets.Value("int32"),
392
- "context": datasets.Sequence(datasets.Value("string")),
393
- "question": datasets.Value("string"),
394
- "choice": datasets.Sequence(datasets.Value("string")),
395
- "answer": datasets.Value("string"),
396
- }
397
- else:
398
- raise NotImplementedError(
399
- "This task is not implemented. If you believe"
400
- " this task was recently added to the CLUE benchmark, "
401
- "please open a GitHub issue and we will add it."
402
- )
403
-
404
- return datasets.DatasetInfo(
405
- description=_CLUE_DESCRIPTION,
406
- features=datasets.Features(features),
407
- homepage=self.config.url,
408
- citation=self.config.citation + "\n" + _CLUE_CITATION,
409
- )
410
-
411
- def _split_generators(self, dl_manager):
412
- if self.config.name == "ocnli":
413
- data_dir = dl_manager.download_and_extract(self.config.data_url)
414
- return [
415
- datasets.SplitGenerator(
416
- name=split,
417
- gen_kwargs={
418
- "data_file": data_dir[split],
419
- "split": split,
420
- },
421
- )
422
- for split in [datasets.Split.TEST, datasets.Split.TRAIN, datasets.Split.VALIDATION]
423
- ]
424
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
425
- data_dir = os.path.join(dl_dir, self.config.data_dir)
426
-
427
- if self.config.name in {"chid", "c3"}:
428
- test_file = "test1.1.json"
429
- elif self.config.name == "diagnostics":
430
- test_file = "diagnostics_test.json"
431
- else:
432
- test_file = "test.json"
433
-
434
- test_split = datasets.SplitGenerator(
435
- name=datasets.Split.TEST,
436
- gen_kwargs={
437
- "data_file": os.path.join(data_dir, test_file),
438
- "split": "test",
439
- },
440
- )
441
-
442
- split_list = [test_split]
443
-
444
- if self.config.name != "diagnostics":
445
- train_split = datasets.SplitGenerator(
446
- name=datasets.Split.TRAIN,
447
- gen_kwargs={
448
- "data_file": os.path.join(
449
- data_dir or "", "train.json" if self.config.name != "c3" else "d-train.json"
450
- ),
451
- "split": "train",
452
- },
453
- )
454
- val_split = datasets.SplitGenerator(
455
- name=datasets.Split.VALIDATION,
456
- gen_kwargs={
457
- "data_file": os.path.join(
458
- data_dir or "", "dev.json" if self.config.name != "c3" else "d-dev.json"
459
- ),
460
- "split": "dev",
461
- },
462
- )
463
- split_list += [train_split, val_split]
464
-
465
- if self.config.name == "cmrc2018":
466
- split_list.append(
467
- datasets.SplitGenerator(
468
- name=datasets.Split("trial"),
469
- gen_kwargs={
470
- "data_file": os.path.join(data_dir or "", "trial.json"),
471
- "split": "trial",
472
- },
473
- )
474
- )
475
-
476
- return split_list
477
-
478
- def _generate_examples(self, data_file, split):
479
- process_label = self.config.process_label
480
- label_classes = self.config.label_classes
481
-
482
- if self.config.name == "chid" and split != "test":
483
- answer_file = os.path.join(os.path.dirname(data_file), f"{split}_answer.json")
484
- answer_dict = json.load(open(answer_file, encoding="utf8"))
485
-
486
- if self.config.name == "c3":
487
- if split == "test":
488
- files = [data_file]
489
- else:
490
- data_dir = os.path.dirname(data_file)
491
- files = [os.path.join(data_dir, f"{typ}-{split}.json") for typ in ["d", "m"]]
492
- data = []
493
- for f in files:
494
- data_subset = json.load(open(f, encoding="utf8"))
495
- data += data_subset
496
- for idx, entry in enumerate(data):
497
- for qidx, question in enumerate(entry[1]):
498
- example = {
499
- "id": idx if split != "test" else int(question["id"]),
500
- "context": entry[0],
501
- "question": question["question"],
502
- "choice": question["choice"],
503
- "answer": question["answer"] if split != "test" else "",
504
- }
505
- yield f"{idx}_{qidx}", example
506
-
507
- else:
508
- with open(data_file, encoding="utf8") as f:
509
- if self.config.name in ["cmrc2018", "drcd"]:
510
- data = json.load(f)
511
- for example in data["data"]:
512
- for paragraph in example["paragraphs"]:
513
- context = paragraph["context"].strip()
514
- for qa in paragraph["qas"]:
515
- question = qa["question"].strip()
516
- id_ = qa["id"]
517
-
518
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
519
- answers = [answer["text"].strip() for answer in qa["answers"]]
520
-
521
- yield id_, {
522
- "context": context,
523
- "question": question,
524
- "id": id_,
525
- "answers": {
526
- "answer_start": answer_starts,
527
- "text": answers,
528
- },
529
- }
530
-
531
- else:
532
- for n, line in enumerate(f):
533
- row = json.loads(line)
534
- example = {feat: row[col] for feat, col in self.config.text_features.items()}
535
- example["idx"] = n if self.config.name != "diagnostics" else int(row["index"])
536
- if self.config.name == "chid": # CHID has a separate gold label file
537
- contents = example["content"]
538
- candidates = example["candidates"]
539
- idiom_list = []
540
- if split != "test":
541
- for content in contents:
542
- idioms = re.findall(r"#idiom\d+#", content)
543
- for idiom in idioms:
544
- idiom_list.append(
545
- {
546
- "candidate_id": answer_dict[idiom],
547
- "text": candidates[answer_dict[idiom]],
548
- }
549
- )
550
- example["answers"] = idiom_list
551
-
552
- elif self.config.label_column in row:
553
- label = row[self.config.label_column]
554
- # Notice: some labels in CMNLI and OCNLI are invalid. We drop these data.
555
- if self.config.name in ["cmnli", "ocnli"] and label == "-":
556
- continue
557
- # For some tasks, the label is represented as 0 and 1 in the tsv
558
- # files and needs to be cast to integer to work with the feature.
559
- if label_classes and label not in label_classes:
560
- label = int(label) if label else None
561
- example["label"] = process_label(label)
562
- else:
563
- example["label"] = process_label(-1)
564
-
565
- # Filter out corrupted rows.
566
- for value in example.values():
567
- if value is None:
568
- break
569
- else:
570
- yield example["idx"], example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cluewsc2020/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04723ac33cb7a4d5498c706617941eba15ec06764e9b8130b4b4a5e27a7b7b65
3
+ size 270556
cluewsc2020/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7b52deaee29d4aee07c27897cb592de6cbabaf8e2eed492a1538f44aa38b1f1
3
+ size 84085
cluewsc2020/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b3877557c3007a54caf54e50eb3af57908f0d615dad31d9617c824e6c925e0a
3
+ size 25970
cmnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f05c4e5ba132a5ea4ce23fcc9cca19ed1ad2b7a38331c04e410adeea394bd7d6
3
+ size 1720208
cmnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2c0d134eaaae593d710d07ad8e6b78cb6fd5ec605f609eda8ae8ef0d04e588a
3
+ size 51159964
cmnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e3c2cdb3f52da28117c24b5a15e261fad6be039cbac715dd2bed0e2ba70e8a7
3
+ size 1354747
cmrc2018/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75eee895cb4420f2fb7f48905b4b503069617e2896b6e50f54bfe5078233e7c5
3
+ size 562527
cmrc2018/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50620fe06f98895de2d6d7b391c979683b68389690653c98fe8c63c0df17a3dc
3
+ size 3365760
cmrc2018/trial-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8915dfa764dc837733b1642375646b125af68500d72156494095e7ee02ee573
3
+ size 394653
cmrc2018/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa0c2e73b896ef311f87f1a5e665a3d55724c6912d74f6727f50609ff13f8aef
3
+ size 1136061
csl/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4edc3eb600c6634b52af583d197f7e41a9e1fa372d58af167d16c4f0bad8c581
3
+ size 888913
csl/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfe8aee2b8ee4bf1c6789931705e475891de1a2e0c7d8712a1d5d54931e52d3a
3
+ size 2650129
csl/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35b716c8f1c4402f301397161ac3b8f665cddc41531f803d64f8c70cf117bcc2
3
+ size 397069
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"afqmc": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://dc.cloud.alipay.com/index#/topic/data?id=8", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "afqmc", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 378726, "num_examples": 3861, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 3396535, "num_examples": 34334, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 426293, "num_examples": 4316, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/afqmc_public.zip": {"num_bytes": 1195044, "checksum": "5a4cb1556b833010c329fa2ad2207d9e98fc94071b7e474015e9dd7c385db4dc"}}, "download_size": 1195044, "post_processing_size": null, "dataset_size": 4201554, "size_in_bytes": 5396598}, "tnews": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/skdjfla/toutiao-text-classfication-dataset", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 15, "names": ["100", "101", "102", "103", "104", "106", "107", "108", "109", "110", "112", "113", "114", "115", "116"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "tnews", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 810974, "num_examples": 10000, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 4245701, "num_examples": 53360, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 797926, "num_examples": 10000, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/tnews_public.zip": {"num_bytes": 5123575, "checksum": "77c476e70cfe0b014a81b84c6e1db2142a8a2f52f4ae0a8216aa75e673933462"}}, "download_size": 5123575, "post_processing_size": null, "dataset_size": 5854601, "size_in_bytes": 10978176}, "iflytek": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 119, "names": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "100", "101", "102", "103", "104", "105", "106", "107", "108", "109", "110", "111", "112", "113", "114", "115", "116", "117", "118"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "iflytek", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2105688, "num_examples": 2600, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 10028613, "num_examples": 12133, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 2157123, "num_examples": 2599, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/iflytek_public.zip": {"num_bytes": 6505938, "checksum": "c59b961b29f1d0bad0c5e01aa62e4a61a80e9cfb980ce89b06c000851fbb3b06"}}, "download_size": 6505938, "post_processing_size": null, "dataset_size": 14291424, "size_in_bytes": 20797362}, "cmnli": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["neutral", "entailment", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "cmnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2386837, "num_examples": 13880, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 67685309, "num_examples": 391783, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 2051845, "num_examples": 12241, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/cmnli_public.zip": {"num_bytes": 31404066, "checksum": "3a3f3b1d3d27134cf11e585156f07fa050bd0a0836821c02696af0dbaa14513b"}}, "download_size": 31404066, "post_processing_size": null, "dataset_size": 72123991, "size_in_bytes": 103528057}, "cluewsc2020": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "", "license": "", "features": {"idx": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["true", "false"], "names_file": null, "id": null, "_type": "ClassLabel"}, "target": {"span1_text": {"dtype": "string", "id": null, "_type": "Value"}, "span2_text": {"dtype": "string", "id": null, "_type": "Value"}, "span1_index": {"dtype": "int32", "id": null, "_type": "Value"}, "span2_index": {"dtype": "int32", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "cluewsc2020", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 645649, "num_examples": 2574, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 288828, "num_examples": 1244, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 72682, "num_examples": 304, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/cluewsc2020_public.zip": {"num_bytes": 281384, "checksum": "ebe5f357b19392d601be401b2fd4246983f534809a078eab7bda1de418fb3ffe"}}, "download_size": 281384, "post_processing_size": null, "dataset_size": 1007159, "size_in_bytes": 1288543}, "csl": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/P01son6415/CSL", "license": "", "features": {"idx": {"dtype": "int32", "id": null, "_type": "Value"}, "corpus_id": {"dtype": "int32", "id": null, "_type": "Value"}, "abst": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["0", "1"], "names_file": null, "id": null, "_type": "ClassLabel"}, "keyword": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "csl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2463740, "num_examples": 3000, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 16478914, "num_examples": 20000, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 2464575, "num_examples": 3000, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/csl_public.zip": {"num_bytes": 3234594, "checksum": "795d1a2e475d59acad8236f6c5baba7a0b43d3e0508cb60f15ffbc76d5f437c4"}}, "download_size": 3234594, "post_processing_size": null, "dataset_size": 21407229, "size_in_bytes": 24641823}, "cmrc2018": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": " @article{cmrc2018-dataset,\n title={A Span-Extraction Dataset for Chinese Machine Reading Comprehension},\n author={Cui, Yiming and Liu, Ting and Xiao, Li and Chen, Zhipeng and Ma, Wentao and Che, Wanxiang and Wang, Shijin and Hu, Guoping},\n journal={arXiv preprint arXiv:1810.07366},\n year={2018}\n}\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://hfl-rc.github.io/cmrc2018/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "cmrc2018", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3112066, "num_examples": 2000, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 15508110, "num_examples": 10142, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 5183809, "num_examples": 3219, "dataset_name": "clue"}, "trial": {"name": "trial", "num_bytes": 1606931, "num_examples": 1002, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/cmrc2018_public.zip": {"num_bytes": 3405146, "checksum": "6c63dc27e728ec5231aeb7d2861b4c90b6c116390582e0c44416cf3edf030b16"}}, "download_size": 3405146, "post_processing_size": null, "dataset_size": 25410916, "size_in_bytes": 28816062}, "drcd": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/DRCKnowledgeTeam/DRCD", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "drcd", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4982402, "num_examples": 3493, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 37443458, "num_examples": 26936, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 5222753, "num_examples": 3524, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/drcd_public.zip": {"num_bytes": 7264200, "checksum": "f03a38bded37572e224b69b822794eca6218f9584afc0918bf8aa2bc77cf968d"}}, "download_size": 7264200, "post_processing_size": null, "dataset_size": 47648613, "size_in_bytes": 54912813}, "chid": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": " @article{Zheng_2019,\n title={ChID: A Large-scale Chinese IDiom Dataset for Cloze Test},\n url={http://dx.doi.org/10.18653/v1/P19-1075},\n DOI={10.18653/v1/p19-1075},\n journal={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},\n publisher={Association for Computational Linguistics},\n author={Zheng, Chujie and Huang, Minlie and Sun, Aixin},\n year={2019}\n}\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://arxiv.org/abs/1906.01265", "license": "", "features": {"idx": {"dtype": "int32", "id": null, "_type": "Value"}, "candidates": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "content": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "candidate_id": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "chid", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11480463, "num_examples": 3447, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 252478178, "num_examples": 84709, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 10117789, "num_examples": 3218, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/chid_public.zip": {"num_bytes": 139199202, "checksum": "be02dbcc41a9c237542141f53836e70a209f7a686cf894891411c7e5534f73a4"}}, "download_size": 139199202, "post_processing_size": null, "dataset_size": 274076430, "size_in_bytes": 413275632}, "c3": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "@article{sun2020investigating,\n author = {Kai Sun and\n Dian Yu and\n Dong Yu and\n Claire Cardie},\n title = {Investigating Prior Knowledge for Challenging Chinese Machine Reading\n Comprehension},\n journal = {Trans. Assoc. Comput. Linguistics},\n volume = {8},\n pages = {141--155},\n year = {2020},\n url = {https://transacl.org/ojs/index.php/tacl/article/view/1882}\n }\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://arxiv.org/abs/1904.09679", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "context": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "choice": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "c3", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1600166, "num_examples": 1625, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 9672787, "num_examples": 11869, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 2990967, "num_examples": 3816, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/c3_public.zip": {"num_bytes": 3495930, "checksum": "20dfa683c57c9795129e22d8dd0c299d6b2a29fbc84758ef2ebdb8b2904d7e12"}}, "download_size": 3495930, "post_processing_size": null, "dataset_size": 14263920, "size_in_bytes": 17759850}, "ocnli": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": " @inproceedings{ocnli,\n title={OCNLI: Original Chinese Natural Language Inference},\n author={Hai Hu and Kyle Richardson and Liang Xu and Lu Li and Sandra Kuebler and Larry Moss},\n booktitle={Findings of EMNLP},\n year={2020},\n url={https://arxiv.org/abs/2010.05444}\n}\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://arxiv.org/abs/2010.05444", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["neutral", "entailment", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "ocnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 376066, "num_examples": 3000, "dataset_name": "clue"}, "train": {"name": "train", "num_bytes": 6187190, "num_examples": 50437, "dataset_name": "clue"}, "validation": {"name": "validation", "num_bytes": 366235, "num_examples": 2950, "dataset_name": "clue"}}, "download_checksums": {"https://github.com/CLUEbenchmark/OCNLI/archive/02d55cb3c7dc984682677b8dd81db6a1e4710720.zip": {"num_bytes": 4359754, "checksum": "93abd5dbe417101d4c7526920fae59cc67969936c1868b1559fa4802839c49d0"}}, "download_size": 4359754, "post_processing_size": null, "dataset_size": 6929491, "size_in_bytes": 11289245}, "diagnostics": {"description": "CLUE, A Chinese Language Understanding Evaluation Benchmark\n(https://www.cluebenchmarks.com/) is a collection of resources for training,\nevaluating, and analyzing Chinese language understanding systems.\n\n", "citation": "\n@misc{xu2020clue,\n title={CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author={Liang Xu and Xuanwei Zhang and Lu Li and Hai Hu and Chenjie Cao and Weitang Liu and Junyi Li and Yudong Li and Kai Sun and Yechen Xu and Yiming Cui and Cong Yu and Qianqian Dong and Yin Tian and Dian Yu and Bo Shi and Jun Zeng and Rongzhao Wang and Weijian Xie and Yanting Li and Yina Patterson and Zuoyu Tian and Yiwen Zhang and He Zhou and Shaoweihua Liu and Qipeng Zhao and Cong Yue and Xinrui Zhang and Zhengliang Yang and Zhenzhong Lan},\n year={2020},\n eprint={2004.05986},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["neutral", "entailment", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "clue", "config_name": "diagnostics", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 42400, "num_examples": 514, "dataset_name": "clue"}}, "download_checksums": {"https://storage.googleapis.com/cluebenchmark/tasks/clue_diagnostics_public.zip": {"num_bytes": 12062, "checksum": "56b52e70c195686557a966c6064c9bdc4dece1de8c89551f89ad046637e9a7c4"}}, "download_size": 12062, "post_processing_size": null, "dataset_size": 42400, "size_in_bytes": 54462}}
 
diagnostics/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3f141fd315bf6814e98d3f780db81af967c1027174f92dddc955aecbf04f80d
3
+ size 23000
drcd/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c27f0bf47c1c944d591528040800a682f359c056ef8a6354c8ca7f5e73c0b2cd
3
+ size 1104239
drcd/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f9172b84bb3f30b3de781311bb2cdaf225d62543a643a8b5f0cbe20768135f8
3
+ size 8858709
drcd/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f40e27ceaec7622bff54aa9631281fe06fa8fbdf836f7c9be4766e8f1035dadc
3
+ size 1225927
iflytek/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00631f11f7120e6ee32a8510ab68641ab1f5eb4a38fc1324f7624799b2a8cd6f
3
+ size 1435417
iflytek/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c67f671203be5125eb6fddd7640a94e7eabf66060e4c016757165b8084121cb
3
+ size 6865782
iflytek/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a75d3ea0f17014c821d2b6f0d4dede26060fe3ffb74437c5a535a8c0533abf75
3
+ size 1476656
ocnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0109a91a89e8066a58826c64423ebd8ddf9890498b458ba89fe7b5b57e743808
3
+ size 289893
ocnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2aefe2b8c21d144b83b07b04fce19152aec5ecb3fa1af1aaf4653d80210f2f0a
3
+ size 2440405
ocnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:685ff743e9e5faa6798bf3d46c4219c4032b8925e517f56c6350dd6bd8cec84d
3
+ size 269920
tnews/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe5b615ad25a0dbf4eea2b47f2ed8f8274fde88bccbb4bd7a5170db84d3d0059
3
+ size 654633
tnews/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57ad8b1a5a6a4120a283468f5a828a2702b742139a1878e1edcd7b8b59eec5de
3
+ size 3399930
tnews/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db0068274df1310edd3603003b8a51486dadb58a67e549304e8156f7dcc31aec
3
+ size 643280