Datasets:
matteogabburo
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -77,29 +77,29 @@ configs:
|
|
77 |
path: "por_test_clean.jsonl"
|
78 |
- split: test_clean_sp
|
79 |
path: "spa_test_clean.jsonl"
|
80 |
-
- split:
|
81 |
path: "eng_dev_no_allneg.jsonl"
|
82 |
-
- split:
|
83 |
path: "deu_dev_no_allneg.jsonl"
|
84 |
-
- split:
|
85 |
path: "fra_dev_no_allneg.jsonl"
|
86 |
-
- split:
|
87 |
path: "ita_dev_no_allneg.jsonl"
|
88 |
-
- split:
|
89 |
path: "por_dev_no_allneg.jsonl"
|
90 |
-
- split:
|
91 |
path: "spa_dev_no_allneg.jsonl"
|
92 |
-
- split:
|
93 |
path: "eng_test_no_allneg.jsonl"
|
94 |
-
- split:
|
95 |
path: "deu_test_no_allneg.jsonl"
|
96 |
-
- split:
|
97 |
path: "fra_test_no_allneg.jsonl"
|
98 |
-
- split:
|
99 |
path: "ita_test_no_allneg.jsonl"
|
100 |
-
- split:
|
101 |
path: "por_test_no_allneg.jsonl"
|
102 |
-
- split:
|
103 |
path: "spa_test_no_allneg.jsonl"
|
104 |
- config_name: clean
|
105 |
data_files:
|
@@ -139,7 +139,7 @@ configs:
|
|
139 |
path: "por_test_clean.jsonl"
|
140 |
- split: test_clean_sp
|
141 |
path: "spa_test_clean.jsonl"
|
142 |
-
- config_name:
|
143 |
data_files:
|
144 |
- split: train_en
|
145 |
path: "eng_train.jsonl"
|
@@ -153,29 +153,29 @@ configs:
|
|
153 |
path: "por_train.jsonl"
|
154 |
- split: train_sp
|
155 |
path: "spa_train.jsonl"
|
156 |
-
- split:
|
157 |
path: "eng_dev_no_allneg.jsonl"
|
158 |
-
- split:
|
159 |
path: "deu_dev_no_allneg.jsonl"
|
160 |
-
- split:
|
161 |
path: "fra_dev_no_allneg.jsonl"
|
162 |
-
- split:
|
163 |
path: "ita_dev_no_allneg.jsonl"
|
164 |
-
- split:
|
165 |
path: "por_dev_no_allneg.jsonl"
|
166 |
-
- split:
|
167 |
path: "spa_dev_no_allneg.jsonl"
|
168 |
-
- split:
|
169 |
path: "eng_test_no_allneg.jsonl"
|
170 |
-
- split:
|
171 |
path: "deu_test_no_allneg.jsonl"
|
172 |
-
- split:
|
173 |
path: "fra_test_no_allneg.jsonl"
|
174 |
-
- split:
|
175 |
path: "ita_test_no_allneg.jsonl"
|
176 |
-
- split:
|
177 |
path: "por_test_no_allneg.jsonl"
|
178 |
-
- split:
|
179 |
path: "spa_test_no_allneg.jsonl"
|
180 |
- config_name: en
|
181 |
data_files:
|
@@ -225,7 +225,7 @@ configs:
|
|
225 |
path: "spa_dev.jsonl"
|
226 |
- split: test
|
227 |
path: "spa_test.jsonl"
|
228 |
-
- config_name:
|
229 |
data_files:
|
230 |
- split: train
|
231 |
path: "eng_train.jsonl"
|
@@ -233,7 +233,7 @@ configs:
|
|
233 |
path: "eng_dev_no_allneg.jsonl"
|
234 |
- split: test
|
235 |
path: "eng_test_no_allneg.jsonl"
|
236 |
-
- config_name:
|
237 |
data_files:
|
238 |
- split: train
|
239 |
path: "deu_train.jsonl"
|
@@ -241,7 +241,7 @@ configs:
|
|
241 |
path: "deu_dev_no_allneg.jsonl"
|
242 |
- split: test
|
243 |
path: "deu_test_no_allneg.jsonl"
|
244 |
-
- config_name:
|
245 |
data_files:
|
246 |
- split: train
|
247 |
path: "fra_train.jsonl"
|
@@ -249,7 +249,7 @@ configs:
|
|
249 |
path: "fra_dev_no_allneg.jsonl"
|
250 |
- split: test
|
251 |
path: "fra_test_no_allneg.jsonl"
|
252 |
-
- config_name:
|
253 |
data_files:
|
254 |
- split: train
|
255 |
path: "ita_train.jsonl"
|
@@ -257,7 +257,7 @@ configs:
|
|
257 |
path: "ita_dev_no_allneg.jsonl"
|
258 |
- split: test
|
259 |
path: "ita_test_no_allneg.jsonl"
|
260 |
-
- config_name:
|
261 |
data_files:
|
262 |
- split: train
|
263 |
path: "por_train.jsonl"
|
@@ -265,7 +265,7 @@ configs:
|
|
265 |
path: "por_dev_no_allneg.jsonl"
|
266 |
- split: test
|
267 |
path: "por_test_no_allneg.jsonl"
|
268 |
-
- config_name:
|
269 |
data_files:
|
270 |
- split: train
|
271 |
path: "spa_train.jsonl"
|
@@ -338,11 +338,11 @@ For each language (English, French, German, Italian, Portuguese, and Spanish), w
|
|
338 |
|
339 |
In addition, the validation and the test splits are available also in the following preprocessed versions:
|
340 |
|
341 |
-
-
|
342 |
- **clean**: without questions with only negative and only positive answer candidates
|
343 |
|
344 |
### How to load them:
|
345 |
-
To use these splits, you can use the following snippet of code replacing ``[LANG]`` with a language identifier (en, fr, de, it, po, sp), and ``[VERSION]`` with the version identifier (
|
346 |
|
347 |
```
|
348 |
from datasets import load_dataset
|
@@ -354,7 +354,7 @@ corpora = load_dataset("matteogabburo/mWikiQA")
|
|
354 |
corpora = load_dataset("matteogabburo/mWikiQA", "clean")
|
355 |
|
356 |
# if you want the "no all negatives" validation and test sets
|
357 |
-
corpora = load_dataset("matteogabburo/mWikiQA", "
|
358 |
|
359 |
"""
|
360 |
if you want the default splits of a specific language, replace [LANG] with an identifier in: en, fr, de, it, po, sp
|
@@ -365,7 +365,7 @@ italian_dataset = load_dataset("matteogabburo/mWikiQA", "it")
|
|
365 |
|
366 |
|
367 |
"""
|
368 |
-
if you want the processed splits ("clean" and "no all negatives" sets), replace [LANG] with a language identifier and [VERSION] with "
|
369 |
dataset = load_dataset("matteogabburo/mWikiQA", "[LANG]_[VERSION]")
|
370 |
"""
|
371 |
# example:
|
|
|
77 |
path: "por_test_clean.jsonl"
|
78 |
- split: test_clean_sp
|
79 |
path: "spa_test_clean.jsonl"
|
80 |
+
- split: validation_noneg_en
|
81 |
path: "eng_dev_no_allneg.jsonl"
|
82 |
+
- split: validation_noneg_de
|
83 |
path: "deu_dev_no_allneg.jsonl"
|
84 |
+
- split: validation_noneg_fr
|
85 |
path: "fra_dev_no_allneg.jsonl"
|
86 |
+
- split: validation_noneg_it
|
87 |
path: "ita_dev_no_allneg.jsonl"
|
88 |
+
- split: validation_noneg_po
|
89 |
path: "por_dev_no_allneg.jsonl"
|
90 |
+
- split: validation_noneg_sp
|
91 |
path: "spa_dev_no_allneg.jsonl"
|
92 |
+
- split: test_noneg_en
|
93 |
path: "eng_test_no_allneg.jsonl"
|
94 |
+
- split: test_noneg_de
|
95 |
path: "deu_test_no_allneg.jsonl"
|
96 |
+
- split: test_noneg_fr
|
97 |
path: "fra_test_no_allneg.jsonl"
|
98 |
+
- split: test_noneg_it
|
99 |
path: "ita_test_no_allneg.jsonl"
|
100 |
+
- split: test_noneg_po
|
101 |
path: "por_test_no_allneg.jsonl"
|
102 |
+
- split: test_noneg_sp
|
103 |
path: "spa_test_no_allneg.jsonl"
|
104 |
- config_name: clean
|
105 |
data_files:
|
|
|
139 |
path: "por_test_clean.jsonl"
|
140 |
- split: test_clean_sp
|
141 |
path: "spa_test_clean.jsonl"
|
142 |
+
- config_name: noneg
|
143 |
data_files:
|
144 |
- split: train_en
|
145 |
path: "eng_train.jsonl"
|
|
|
153 |
path: "por_train.jsonl"
|
154 |
- split: train_sp
|
155 |
path: "spa_train.jsonl"
|
156 |
+
- split: validation_noneg_en
|
157 |
path: "eng_dev_no_allneg.jsonl"
|
158 |
+
- split: validation_noneg_de
|
159 |
path: "deu_dev_no_allneg.jsonl"
|
160 |
+
- split: validation_noneg_fr
|
161 |
path: "fra_dev_no_allneg.jsonl"
|
162 |
+
- split: validation_noneg_it
|
163 |
path: "ita_dev_no_allneg.jsonl"
|
164 |
+
- split: validation_noneg_po
|
165 |
path: "por_dev_no_allneg.jsonl"
|
166 |
+
- split: validation_noneg_sp
|
167 |
path: "spa_dev_no_allneg.jsonl"
|
168 |
+
- split: test_noneg_en
|
169 |
path: "eng_test_no_allneg.jsonl"
|
170 |
+
- split: test_noneg_de
|
171 |
path: "deu_test_no_allneg.jsonl"
|
172 |
+
- split: test_noneg_fr
|
173 |
path: "fra_test_no_allneg.jsonl"
|
174 |
+
- split: test_noneg_it
|
175 |
path: "ita_test_no_allneg.jsonl"
|
176 |
+
- split: test_noneg_po
|
177 |
path: "por_test_no_allneg.jsonl"
|
178 |
+
- split: test_noneg_sp
|
179 |
path: "spa_test_no_allneg.jsonl"
|
180 |
- config_name: en
|
181 |
data_files:
|
|
|
225 |
path: "spa_dev.jsonl"
|
226 |
- split: test
|
227 |
path: "spa_test.jsonl"
|
228 |
+
- config_name: en_noneg
|
229 |
data_files:
|
230 |
- split: train
|
231 |
path: "eng_train.jsonl"
|
|
|
233 |
path: "eng_dev_no_allneg.jsonl"
|
234 |
- split: test
|
235 |
path: "eng_test_no_allneg.jsonl"
|
236 |
+
- config_name: de_noneg
|
237 |
data_files:
|
238 |
- split: train
|
239 |
path: "deu_train.jsonl"
|
|
|
241 |
path: "deu_dev_no_allneg.jsonl"
|
242 |
- split: test
|
243 |
path: "deu_test_no_allneg.jsonl"
|
244 |
+
- config_name: fr_noneg
|
245 |
data_files:
|
246 |
- split: train
|
247 |
path: "fra_train.jsonl"
|
|
|
249 |
path: "fra_dev_no_allneg.jsonl"
|
250 |
- split: test
|
251 |
path: "fra_test_no_allneg.jsonl"
|
252 |
+
- config_name: it_noneg
|
253 |
data_files:
|
254 |
- split: train
|
255 |
path: "ita_train.jsonl"
|
|
|
257 |
path: "ita_dev_no_allneg.jsonl"
|
258 |
- split: test
|
259 |
path: "ita_test_no_allneg.jsonl"
|
260 |
+
- config_name: po_noneg
|
261 |
data_files:
|
262 |
- split: train
|
263 |
path: "por_train.jsonl"
|
|
|
265 |
path: "por_dev_no_allneg.jsonl"
|
266 |
- split: test
|
267 |
path: "por_test_no_allneg.jsonl"
|
268 |
+
- config_name: sp_noneg
|
269 |
data_files:
|
270 |
- split: train
|
271 |
path: "spa_train.jsonl"
|
|
|
338 |
|
339 |
In addition, the validation and the test splits are available also in the following preprocessed versions:
|
340 |
|
341 |
+
- **noneg**: without questions with only negative answer candidates
|
342 |
- **clean**: without questions with only negative and only positive answer candidates
|
343 |
|
344 |
### How to load them:
|
345 |
+
To use these splits, you can use the following snippet of code replacing ``[LANG]`` with a language identifier (en, fr, de, it, po, sp), and ``[VERSION]`` with the version identifier (noneg, clean)
|
346 |
|
347 |
```
|
348 |
from datasets import load_dataset
|
|
|
354 |
corpora = load_dataset("matteogabburo/mWikiQA", "clean")
|
355 |
|
356 |
# if you want the "no all negatives" validation and test sets
|
357 |
+
corpora = load_dataset("matteogabburo/mWikiQA", "noneg")
|
358 |
|
359 |
"""
|
360 |
if you want the default splits of a specific language, replace [LANG] with an identifier in: en, fr, de, it, po, sp
|
|
|
365 |
|
366 |
|
367 |
"""
|
368 |
+
if you want the processed splits ("clean" and "no all negatives" sets), replace [LANG] with a language identifier and [VERSION] with "noneg" or "clean"
|
369 |
dataset = load_dataset("matteogabburo/mWikiQA", "[LANG]_[VERSION]")
|
370 |
"""
|
371 |
# example:
|