albertvillanova HF staff commited on
Commit
26a2ff7
1 Parent(s): c302a00

Add re data files

Browse files
README.md CHANGED
@@ -221,13 +221,13 @@ dataset_info:
221
  dtype: string
222
  splits:
223
  - name: train
224
- num_bytes: 11145538
225
  num_examples: 32470
226
  - name: validation
227
- num_bytes: 2559300
228
  num_examples: 7765
229
- download_size: 5669259
230
- dataset_size: 13704838
231
  - config_name: sts
232
  features:
233
  - name: guid
@@ -325,6 +325,12 @@ configs:
325
  path: nli/train-*
326
  - split: validation
327
  path: nli/validation-*
 
 
 
 
 
 
328
  - config_name: sts
329
  data_files:
330
  - split: train
 
221
  dtype: string
222
  splits:
223
  - name: train
224
+ num_bytes: 11145426
225
  num_examples: 32470
226
  - name: validation
227
+ num_bytes: 2559272
228
  num_examples: 7765
229
+ download_size: 8190257
230
+ dataset_size: 13704698
231
  - config_name: sts
232
  features:
233
  - name: guid
 
325
  path: nli/train-*
326
  - split: validation
327
  path: nli/validation-*
328
+ - config_name: re
329
+ data_files:
330
+ - split: train
331
+ path: re/train-*
332
+ - split: validation
333
+ path: re/validation-*
334
  - config_name: sts
335
  data_files:
336
  - split: train
dataset_infos.json CHANGED
@@ -264,60 +264,49 @@
264
  "features": {
265
  "guid": {
266
  "dtype": "string",
267
- "id": null,
268
  "_type": "Value"
269
  },
270
  "sentence": {
271
  "dtype": "string",
272
- "id": null,
273
  "_type": "Value"
274
  },
275
  "subject_entity": {
276
  "word": {
277
  "dtype": "string",
278
- "id": null,
279
  "_type": "Value"
280
  },
281
  "start_idx": {
282
  "dtype": "int32",
283
- "id": null,
284
  "_type": "Value"
285
  },
286
  "end_idx": {
287
  "dtype": "int32",
288
- "id": null,
289
  "_type": "Value"
290
  },
291
  "type": {
292
  "dtype": "string",
293
- "id": null,
294
  "_type": "Value"
295
  }
296
  },
297
  "object_entity": {
298
  "word": {
299
  "dtype": "string",
300
- "id": null,
301
  "_type": "Value"
302
  },
303
  "start_idx": {
304
  "dtype": "int32",
305
- "id": null,
306
  "_type": "Value"
307
  },
308
  "end_idx": {
309
  "dtype": "int32",
310
- "id": null,
311
  "_type": "Value"
312
  },
313
  "type": {
314
  "dtype": "string",
315
- "id": null,
316
  "_type": "Value"
317
  }
318
  },
319
  "label": {
320
- "num_classes": 30,
321
  "names": [
322
  "no_relation",
323
  "org:dissolved",
@@ -350,20 +339,15 @@
350
  "per:religion",
351
  "per:title"
352
  ],
353
- "names_file": null,
354
- "id": null,
355
  "_type": "ClassLabel"
356
  },
357
  "source": {
358
  "dtype": "string",
359
- "id": null,
360
  "_type": "Value"
361
  }
362
  },
363
- "post_processed": null,
364
- "supervised_keys": null,
365
- "task_templates": null,
366
  "builder_name": "klue",
 
367
  "config_name": "re",
368
  "version": {
369
  "version_str": "1.0.0",
@@ -375,27 +359,20 @@
375
  "splits": {
376
  "train": {
377
  "name": "train",
378
- "num_bytes": 11145538,
379
  "num_examples": 32470,
380
- "dataset_name": "klue"
381
  },
382
  "validation": {
383
  "name": "validation",
384
- "num_bytes": 2559300,
385
  "num_examples": 7765,
386
- "dataset_name": "klue"
387
- }
388
- },
389
- "download_checksums": {
390
- "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000070/data/klue-re-v1.tar.gz": {
391
- "num_bytes": 5669259,
392
- "checksum": "b09ceac0d986cc09e42fcda9c7f2873c0eea8ec0629baf91fead36580790f8f5"
393
  }
394
  },
395
- "download_size": 5669259,
396
- "post_processing_size": null,
397
- "dataset_size": 13704838,
398
- "size_in_bytes": 19374097
399
  },
400
  "dp": {
401
  "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
 
264
  "features": {
265
  "guid": {
266
  "dtype": "string",
 
267
  "_type": "Value"
268
  },
269
  "sentence": {
270
  "dtype": "string",
 
271
  "_type": "Value"
272
  },
273
  "subject_entity": {
274
  "word": {
275
  "dtype": "string",
 
276
  "_type": "Value"
277
  },
278
  "start_idx": {
279
  "dtype": "int32",
 
280
  "_type": "Value"
281
  },
282
  "end_idx": {
283
  "dtype": "int32",
 
284
  "_type": "Value"
285
  },
286
  "type": {
287
  "dtype": "string",
 
288
  "_type": "Value"
289
  }
290
  },
291
  "object_entity": {
292
  "word": {
293
  "dtype": "string",
 
294
  "_type": "Value"
295
  },
296
  "start_idx": {
297
  "dtype": "int32",
 
298
  "_type": "Value"
299
  },
300
  "end_idx": {
301
  "dtype": "int32",
 
302
  "_type": "Value"
303
  },
304
  "type": {
305
  "dtype": "string",
 
306
  "_type": "Value"
307
  }
308
  },
309
  "label": {
 
310
  "names": [
311
  "no_relation",
312
  "org:dissolved",
 
339
  "per:religion",
340
  "per:title"
341
  ],
 
 
342
  "_type": "ClassLabel"
343
  },
344
  "source": {
345
  "dtype": "string",
 
346
  "_type": "Value"
347
  }
348
  },
 
 
 
349
  "builder_name": "klue",
350
+ "dataset_name": "klue",
351
  "config_name": "re",
352
  "version": {
353
  "version_str": "1.0.0",
 
359
  "splits": {
360
  "train": {
361
  "name": "train",
362
+ "num_bytes": 11145426,
363
  "num_examples": 32470,
364
+ "dataset_name": null
365
  },
366
  "validation": {
367
  "name": "validation",
368
+ "num_bytes": 2559272,
369
  "num_examples": 7765,
370
+ "dataset_name": null
 
 
 
 
 
 
371
  }
372
  },
373
+ "download_size": 8190257,
374
+ "dataset_size": 13704698,
375
+ "size_in_bytes": 21894955
 
376
  },
377
  "dp": {
378
  "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
re/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:329e15f482c794900d53627730448945607f2edb12397811eb6f671bf44d57da
3
+ size 6647272
re/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10aade1fdb0244eb867eb8a7e3459d391546016c792989eb1030e23603a28531
3
+ size 1542985