Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
extended|glue
Tags:
License:
MtCelesteMa commited on
Commit
0e3d547
1 Parent(s): 3aa706f

Upload Dataset

Browse files
Files changed (5) hide show
  1. .gitattributes +2 -0
  2. multiglue.py +46 -0
  3. test.json +3 -0
  4. train.json +3 -0
  5. validation.json +0 -0
.gitattributes CHANGED
@@ -52,3 +52,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ test.json filter=lfs diff=lfs merge=lfs -text
56
+ train.json filter=lfs diff=lfs merge=lfs -text
multiglue.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MultiGLUE
2
+ # A dataset consisting of all binary classification GLUE tasks
3
+
4
+ import os
5
+ import json
6
+
7
+ import datasets
8
+
9
+
10
+ class MultiGLUE(datasets.GeneratorBasedBuilder):
11
+ def _info(self):
12
+ return datasets.DatasetInfo(
13
+ description='A dataset consisting of all binary classification GLUE tasks',
14
+ features=datasets.Features({
15
+ 'task': datasets.Value('string'),
16
+ 'sentence1': datasets.Value('string'),
17
+ 'sentence2': datasets.Value('string'),
18
+ 'label': datasets.ClassLabel(num_classes=2)
19
+ })
20
+ )
21
+
22
+ def _split_generators(self, dl_manager):
23
+ data_files = dl_manager.download_and_extract({
24
+ 'train': 'train.json',
25
+ 'validation': 'validation.json',
26
+ 'test': 'test.json'
27
+ })
28
+ return [
29
+ datasets.SplitGenerator(
30
+ name=datasets.Split.TRAIN,
31
+ gen_kwargs={'filepath': data_files['train']}
32
+ ),
33
+ datasets.SplitGenerator(
34
+ name=datasets.Split.VALIDATION,
35
+ gen_kwargs={'filepath': data_files['validation']}
36
+ ),
37
+ datasets.SplitGenerator(
38
+ name=datasets.Split.TEST,
39
+ gen_kwargs={'filepath': data_files['test']}
40
+ )
41
+ ]
42
+
43
+ def _generate_examples(self, filepath):
44
+ with open(filepath) as f:
45
+ for key, row in enumerate(f):
46
+ yield key, json.loads(row)
test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1156a0ae8334661d7d2cb74438c8a19fb0e3f93d81f5510d9ec78933721a9e35
3
+ size 73078477
train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fbdbb7db8d01a35c8b797efeb6d469aa257faca2365c010316dab6fdde98589
3
+ size 103823019
validation.json ADDED
The diff for this file is too large to render. See raw diff