albertvillanova HF staff commited on
Commit
85520db
1 Parent(s): a17903a

Add mrpc data files

Browse files
README.md CHANGED
@@ -182,17 +182,17 @@ dataset_info:
182
  - name: idx
183
  dtype: int32
184
  splits:
185
- - name: test
186
- num_bytes: 443498
187
- num_examples: 1725
188
  - name: train
189
- num_bytes: 946146
190
  num_examples: 3668
191
  - name: validation
192
- num_bytes: 106142
193
  num_examples: 408
194
- download_size: 1494541
195
- dataset_size: 1495786
 
 
 
196
  - config_name: qnli
197
  features:
198
  - name: question
@@ -352,6 +352,14 @@ configs:
352
  path: cola/validation-*
353
  - split: test
354
  path: cola/test-*
 
 
 
 
 
 
 
 
355
  - config_name: sst2
356
  data_files:
357
  - split: train
 
182
  - name: idx
183
  dtype: int32
184
  splits:
 
 
 
185
  - name: train
186
+ num_bytes: 943843
187
  num_examples: 3668
188
  - name: validation
189
+ num_bytes: 105879
190
  num_examples: 408
191
+ - name: test
192
+ num_bytes: 442410
193
+ num_examples: 1725
194
+ download_size: 1033400
195
+ dataset_size: 1492132
196
  - config_name: qnli
197
  features:
198
  - name: question
 
352
  path: cola/validation-*
353
  - split: test
354
  path: cola/test-*
355
+ - config_name: mrpc
356
+ data_files:
357
+ - split: train
358
+ path: mrpc/train-*
359
+ - split: validation
360
+ path: mrpc/validation-*
361
+ - split: test
362
+ path: mrpc/test-*
363
  - config_name: sst2
364
  data_files:
365
  - split: train
dataset_infos.json CHANGED
@@ -113,39 +113,32 @@
113
  },
114
  "mrpc": {
115
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
116
- "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
117
  "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398",
118
  "license": "",
119
  "features": {
120
  "sentence1": {
121
  "dtype": "string",
122
- "id": null,
123
  "_type": "Value"
124
  },
125
  "sentence2": {
126
  "dtype": "string",
127
- "id": null,
128
  "_type": "Value"
129
  },
130
  "label": {
131
- "num_classes": 2,
132
  "names": [
133
  "not_equivalent",
134
  "equivalent"
135
  ],
136
- "names_file": null,
137
- "id": null,
138
  "_type": "ClassLabel"
139
  },
140
  "idx": {
141
  "dtype": "int32",
142
- "id": null,
143
  "_type": "Value"
144
  }
145
  },
146
- "post_processed": null,
147
- "supervised_keys": null,
148
  "builder_name": "glue",
 
149
  "config_name": "mrpc",
150
  "version": {
151
  "version_str": "1.0.0",
@@ -155,43 +148,28 @@
155
  "patch": 0
156
  },
157
  "splits": {
158
- "test": {
159
- "name": "test",
160
- "num_bytes": 443498,
161
- "num_examples": 1725,
162
- "dataset_name": "glue"
163
- },
164
  "train": {
165
  "name": "train",
166
- "num_bytes": 946146,
167
  "num_examples": 3668,
168
- "dataset_name": "glue"
169
  },
170
  "validation": {
171
  "name": "validation",
172
- "num_bytes": 106142,
173
  "num_examples": 408,
174
- "dataset_name": "glue"
175
- }
176
- },
177
- "download_checksums": {
178
- "https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv": {
179
- "num_bytes": 6222,
180
- "checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"
181
- },
182
- "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {
183
- "num_bytes": 1047044,
184
- "checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"
185
  },
186
- "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {
187
- "num_bytes": 441275,
188
- "checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"
 
 
189
  }
190
  },
191
- "download_size": 1494541,
192
- "post_processing_size": null,
193
- "dataset_size": 1495786,
194
- "size_in_bytes": 2990327
195
  },
196
  "qqp": {
197
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
 
113
  },
114
  "mrpc": {
115
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
116
+ "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
117
  "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398",
118
  "license": "",
119
  "features": {
120
  "sentence1": {
121
  "dtype": "string",
 
122
  "_type": "Value"
123
  },
124
  "sentence2": {
125
  "dtype": "string",
 
126
  "_type": "Value"
127
  },
128
  "label": {
 
129
  "names": [
130
  "not_equivalent",
131
  "equivalent"
132
  ],
 
 
133
  "_type": "ClassLabel"
134
  },
135
  "idx": {
136
  "dtype": "int32",
 
137
  "_type": "Value"
138
  }
139
  },
 
 
140
  "builder_name": "glue",
141
+ "dataset_name": "glue",
142
  "config_name": "mrpc",
143
  "version": {
144
  "version_str": "1.0.0",
 
148
  "patch": 0
149
  },
150
  "splits": {
 
 
 
 
 
 
151
  "train": {
152
  "name": "train",
153
+ "num_bytes": 943843,
154
  "num_examples": 3668,
155
+ "dataset_name": null
156
  },
157
  "validation": {
158
  "name": "validation",
159
+ "num_bytes": 105879,
160
  "num_examples": 408,
161
+ "dataset_name": null
 
 
 
 
 
 
 
 
 
 
162
  },
163
+ "test": {
164
+ "name": "test",
165
+ "num_bytes": 442410,
166
+ "num_examples": 1725,
167
+ "dataset_name": null
168
  }
169
  },
170
+ "download_size": 1033400,
171
+ "dataset_size": 1492132,
172
+ "size_in_bytes": 2525532
 
173
  },
174
  "qqp": {
175
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
mrpc/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a623ed1cbdf445b11f8e249acbf649d7d3a5ee58c918554c40cbd8307e488693
3
+ size 308441
mrpc/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61fd41301e0e244b0420c4350a170c8e7cf64740335fc875a4af2d79af0df0af
3
+ size 649281
mrpc/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c007dbf5bfa8463d87a13e6226df8c0fcf2596c2cd39d0f3bb79754e00f50f
3
+ size 75678