fix download code
Browse files- README.md +43 -43
- dataset_infos.json +0 -1
- russian_super_glue.py +9 -7
README.md
CHANGED
@@ -54,10 +54,10 @@ dataset_info:
|
|
54 |
'1': not_entailment
|
55 |
splits:
|
56 |
- name: test
|
57 |
-
num_bytes:
|
58 |
num_examples: 1104
|
59 |
-
download_size:
|
60 |
-
dataset_size:
|
61 |
- config_name: rcb
|
62 |
features:
|
63 |
- name: premise
|
@@ -79,16 +79,16 @@ dataset_info:
|
|
79 |
'2': neutral
|
80 |
splits:
|
81 |
- name: train
|
82 |
-
num_bytes:
|
83 |
num_examples: 438
|
84 |
- name: validation
|
85 |
-
num_bytes:
|
86 |
num_examples: 220
|
87 |
- name: test
|
88 |
-
num_bytes:
|
89 |
num_examples: 438
|
90 |
-
download_size:
|
91 |
-
dataset_size:
|
92 |
- config_name: parus
|
93 |
features:
|
94 |
- name: premise
|
@@ -109,16 +109,16 @@ dataset_info:
|
|
109 |
'1': choice2
|
110 |
splits:
|
111 |
- name: train
|
112 |
-
num_bytes:
|
113 |
num_examples: 400
|
114 |
- name: validation
|
115 |
-
num_bytes:
|
116 |
num_examples: 100
|
117 |
- name: test
|
118 |
-
num_bytes:
|
119 |
num_examples: 500
|
120 |
-
download_size:
|
121 |
-
dataset_size:
|
122 |
- config_name: muserc
|
123 |
features:
|
124 |
- name: paragraph
|
@@ -143,16 +143,16 @@ dataset_info:
|
|
143 |
'1': 'True'
|
144 |
splits:
|
145 |
- name: train
|
146 |
-
num_bytes:
|
147 |
num_examples: 11950
|
148 |
- name: validation
|
149 |
-
num_bytes:
|
150 |
num_examples: 2235
|
151 |
- name: test
|
152 |
-
num_bytes:
|
153 |
num_examples: 7614
|
154 |
-
download_size:
|
155 |
-
dataset_size:
|
156 |
- config_name: terra
|
157 |
features:
|
158 |
- name: premise
|
@@ -169,16 +169,16 @@ dataset_info:
|
|
169 |
'1': not_entailment
|
170 |
splits:
|
171 |
- name: train
|
172 |
-
num_bytes:
|
173 |
num_examples: 2616
|
174 |
- name: validation
|
175 |
-
num_bytes:
|
176 |
num_examples: 307
|
177 |
- name: test
|
178 |
-
num_bytes:
|
179 |
num_examples: 3198
|
180 |
-
download_size:
|
181 |
-
dataset_size:
|
182 |
- config_name: russe
|
183 |
features:
|
184 |
- name: word
|
@@ -209,16 +209,16 @@ dataset_info:
|
|
209 |
'1': 'True'
|
210 |
splits:
|
211 |
- name: train
|
212 |
-
num_bytes:
|
213 |
num_examples: 19845
|
214 |
- name: validation
|
215 |
-
num_bytes:
|
216 |
num_examples: 8505
|
217 |
- name: test
|
218 |
-
num_bytes:
|
219 |
num_examples: 18892
|
220 |
-
download_size:
|
221 |
-
dataset_size:
|
222 |
- config_name: rwsd
|
223 |
features:
|
224 |
- name: text
|
@@ -241,16 +241,16 @@ dataset_info:
|
|
241 |
'1': 'True'
|
242 |
splits:
|
243 |
- name: train
|
244 |
-
num_bytes:
|
245 |
num_examples: 606
|
246 |
- name: validation
|
247 |
-
num_bytes:
|
248 |
num_examples: 204
|
249 |
- name: test
|
250 |
-
num_bytes:
|
251 |
num_examples: 154
|
252 |
-
download_size:
|
253 |
-
dataset_size:
|
254 |
- config_name: danetqa
|
255 |
features:
|
256 |
- name: question
|
@@ -267,16 +267,16 @@ dataset_info:
|
|
267 |
'1': 'True'
|
268 |
splits:
|
269 |
- name: train
|
270 |
-
num_bytes:
|
271 |
num_examples: 1749
|
272 |
- name: validation
|
273 |
-
num_bytes:
|
274 |
num_examples: 821
|
275 |
- name: test
|
276 |
-
num_bytes:
|
277 |
num_examples: 805
|
278 |
-
download_size:
|
279 |
-
dataset_size:
|
280 |
- config_name: rucos
|
281 |
features:
|
282 |
- name: passage
|
@@ -295,16 +295,16 @@ dataset_info:
|
|
295 |
dtype: int32
|
296 |
splits:
|
297 |
- name: train
|
298 |
-
num_bytes:
|
299 |
num_examples: 72193
|
300 |
- name: validation
|
301 |
-
num_bytes:
|
302 |
num_examples: 7577
|
303 |
- name: test
|
304 |
-
num_bytes:
|
305 |
num_examples: 7257
|
306 |
-
download_size:
|
307 |
-
dataset_size:
|
308 |
tags:
|
309 |
- glue
|
310 |
- qa
|
|
|
54 |
'1': not_entailment
|
55 |
splits:
|
56 |
- name: test
|
57 |
+
num_bytes: 470282
|
58 |
num_examples: 1104
|
59 |
+
download_size: 546821
|
60 |
+
dataset_size: 470282
|
61 |
- config_name: rcb
|
62 |
features:
|
63 |
- name: premise
|
|
|
79 |
'2': neutral
|
80 |
splits:
|
81 |
- name: train
|
82 |
+
num_bytes: 199696
|
83 |
num_examples: 438
|
84 |
- name: validation
|
85 |
+
num_bytes: 97977
|
86 |
num_examples: 220
|
87 |
- name: test
|
88 |
+
num_bytes: 207015
|
89 |
num_examples: 438
|
90 |
+
download_size: 590169
|
91 |
+
dataset_size: 504688
|
92 |
- config_name: parus
|
93 |
features:
|
94 |
- name: premise
|
|
|
109 |
'1': choice2
|
110 |
splits:
|
111 |
- name: train
|
112 |
+
num_bytes: 74451
|
113 |
num_examples: 400
|
114 |
- name: validation
|
115 |
+
num_bytes: 19381
|
116 |
num_examples: 100
|
117 |
- name: test
|
118 |
+
num_bytes: 93176
|
119 |
num_examples: 500
|
120 |
+
download_size: 238685
|
121 |
+
dataset_size: 187008
|
122 |
- config_name: muserc
|
123 |
features:
|
124 |
- name: paragraph
|
|
|
143 |
'1': 'True'
|
144 |
splits:
|
145 |
- name: train
|
146 |
+
num_bytes: 31651131
|
147 |
num_examples: 11950
|
148 |
- name: validation
|
149 |
+
num_bytes: 5964145
|
150 |
num_examples: 2235
|
151 |
- name: test
|
152 |
+
num_bytes: 19850918
|
153 |
num_examples: 7614
|
154 |
+
download_size: 5236394
|
155 |
+
dataset_size: 57466194
|
156 |
- config_name: terra
|
157 |
features:
|
158 |
- name: premise
|
|
|
169 |
'1': not_entailment
|
170 |
splits:
|
171 |
- name: train
|
172 |
+
num_bytes: 1409235
|
173 |
num_examples: 2616
|
174 |
- name: validation
|
175 |
+
num_bytes: 161477
|
176 |
num_examples: 307
|
177 |
- name: test
|
178 |
+
num_bytes: 1713491
|
179 |
num_examples: 3198
|
180 |
+
download_size: 3542264
|
181 |
+
dataset_size: 3284203
|
182 |
- config_name: russe
|
183 |
features:
|
184 |
- name: word
|
|
|
209 |
'1': 'True'
|
210 |
splits:
|
211 |
- name: train
|
212 |
+
num_bytes: 6913256
|
213 |
num_examples: 19845
|
214 |
- name: validation
|
215 |
+
num_bytes: 2957479
|
216 |
num_examples: 8505
|
217 |
- name: test
|
218 |
+
num_bytes: 10045976
|
219 |
num_examples: 18892
|
220 |
+
download_size: 24773401
|
221 |
+
dataset_size: 19916711
|
222 |
- config_name: rwsd
|
223 |
features:
|
224 |
- name: text
|
|
|
241 |
'1': 'True'
|
242 |
splits:
|
243 |
- name: train
|
244 |
+
num_bytes: 132262
|
245 |
num_examples: 606
|
246 |
- name: validation
|
247 |
+
num_bytes: 87947
|
248 |
num_examples: 204
|
249 |
- name: test
|
250 |
+
num_bytes: 59039
|
251 |
num_examples: 154
|
252 |
+
download_size: 366466
|
253 |
+
dataset_size: 279248
|
254 |
- config_name: danetqa
|
255 |
features:
|
256 |
- name: question
|
|
|
267 |
'1': 'True'
|
268 |
splits:
|
269 |
- name: train
|
270 |
+
num_bytes: 2473998
|
271 |
num_examples: 1749
|
272 |
- name: validation
|
273 |
+
num_bytes: 1076447
|
274 |
num_examples: 821
|
275 |
- name: test
|
276 |
+
num_bytes: 1023054
|
277 |
num_examples: 805
|
278 |
+
download_size: 4695018
|
279 |
+
dataset_size: 4573499
|
280 |
- config_name: rucos
|
281 |
features:
|
282 |
- name: passage
|
|
|
295 |
dtype: int32
|
296 |
splits:
|
297 |
- name: train
|
298 |
+
num_bytes: 160095186
|
299 |
num_examples: 72193
|
300 |
- name: validation
|
301 |
+
num_bytes: 16980539
|
302 |
num_examples: 7577
|
303 |
- name: test
|
304 |
+
num_bytes: 15535185
|
305 |
num_examples: 7257
|
306 |
+
download_size: 220047252
|
307 |
+
dataset_size: 192610910
|
308 |
tags:
|
309 |
- glue
|
310 |
- qa
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"lidirus": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\n\"LiDiRus (Linguistic Diagnostic for Russian) is a diagnostic dataset that covers a large volume of linguistic phenomena,\nwhile allowing you to evaluate information systems on a simple test of textual entailment recognition.\nSee more details diagnostics.\n", "citation": "\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/LiDiRus", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "knowledge": {"dtype": "string", "id": null, "_type": "Value"}, "lexical-semantics": {"dtype": "string", "id": null, "_type": "Value"}, "logic": {"dtype": "string", "id": null, "_type": "Value"}, "predicate-argument-structure": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "lidirus", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"test": {"name": "test", "num_bytes": 470306, "num_examples": 1104, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/LiDiRus": {"num_bytes": 47118, "checksum": "e1bc2c55cadbc98b0e77a43e1560af8e2fe336ab4bc372f545f69cb48a479cdc"}}, "download_size": 47118, "post_processing_size": null, "dataset_size": 470306, "size_in_bytes": 517424}, "rcb": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\nThe Russian Commitment Bank is a corpus of naturally occurring discourses whose final sentence contains\na clause-embedding predicate under an entailment canceling operator (question, modal, negation, antecedent\nof conditional).\n", "citation": "\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/RCB", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "verb": {"dtype": "string", "id": null, "_type": "Value"}, "negation": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "contradiction", "neutral"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "rcb", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 199712, "num_examples": 438, "dataset_name": "russian_super_glue"}, "validation": {"name": "validation", "num_bytes": 97993, "num_examples": 220, "dataset_name": "russian_super_glue"}, "test": {"name": "test", "num_bytes": 207031, "num_examples": 438, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/RCB": {"num_bytes": 136700, "checksum": "9ecc0bd0cc04e04922349212452166404b5a75b67be9e6aa996c80c64beb6ddb"}}, "download_size": 136700, "post_processing_size": null, "dataset_size": 504736, "size_in_bytes": 641436}, "parus": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\nChoice of Plausible Alternatives for Russian language\nChoice of Plausible Alternatives for Russian language (PARus) evaluation provides researchers with a tool for assessing\nprogress in open-domain commonsense causal reasoning. Each question in PARus is composed of a premise and two\nalternatives, where the task is to select the alternative that more plausibly has a causal relation with the premise.\nThe correct alternative is randomized so that the expected performance of randomly guessing is 50%.\n", "citation": "\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/PARus", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "choice1": {"dtype": "string", "id": null, "_type": "Value"}, "choice2": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["choice1", "choice2"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "parus", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 74467, "num_examples": 400, "dataset_name": "russian_super_glue"}, "validation": {"name": "validation", "num_bytes": 19397, "num_examples": 100, "dataset_name": "russian_super_glue"}, "test": {"name": "test", "num_bytes": 93192, "num_examples": 500, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/PARus": {"num_bytes": 57585, "checksum": "7093c859a6ab07eab54c86cdd686b7b14afffee46abcd3e2d43876b1fdb8fa59"}}, "download_size": 57585, "post_processing_size": null, "dataset_size": 187056, "size_in_bytes": 244641}, "muserc": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\nWe present a reading comprehension challenge in which questions can only be answered by taking into account information\nfrom multiple sentences. The dataset is the first to study multi-sentence inference at scale, with an open-ended set of\nquestion types that requires reasoning skills.\n", "citation": "@inproceedings{fenogenova-etal-2020-read,\n title = \"Read and Reason with {M}u{S}e{RC} and {R}u{C}o{S}: Datasets for Machine Reading Comprehension for {R}ussian\",\n author = \"Fenogenova, Alena and\n Mikhailov, Vladislav and\n Shevelev, Denis\",\n booktitle = \"Proceedings of the 28th International Conference on Computational Linguistics\",\n month = dec,\n year = \"2020\",\n address = \"Barcelona, Spain (Online)\",\n publisher = \"International Committee on Computational Linguistics\",\n url = \"https://aclanthology.org/2020.coling-main.570\",\n doi = \"10.18653/v1/2020.coling-main.570\",\n pages = \"6481--6497\",\n abstract = \"The paper introduces two Russian machine reading comprehension (MRC) datasets, called MuSeRC and RuCoS,\n which require reasoning over multiple sentences and commonsense knowledge to infer the answer. The former follows\n the design of MultiRC, while the latter is a counterpart of the ReCoRD dataset. The datasets are included\n in RussianSuperGLUE, the Russian general language understanding benchmark. We provide a comparative analysis\n and demonstrate that the proposed tasks are relatively more complex as compared to the original ones for English.\n Besides, performance results of human solvers and BERT-based models show that MuSeRC and RuCoS represent a challenge\n for recent advanced neural models. We thus hope to facilitate research in the field of MRC for Russian and prompt\n the study of multi-hop reasoning in a cross-lingual scenario.\",\n}\n\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/MuSeRC", "license": "", "features": {"paragraph": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"paragraph": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "int32", "id": null, "_type": "Value"}, "answer": {"dtype": "int32", "id": null, "_type": "Value"}}, "label": {"num_classes": 2, "names": ["False", "True"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "muserc", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 31651155, "num_examples": 11950, "dataset_name": "russian_super_glue"}, "validation": {"name": "validation", "num_bytes": 5964157, "num_examples": 2235, "dataset_name": "russian_super_glue"}, "test": {"name": "test", "num_bytes": 19850930, "num_examples": 7614, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/MuSeRC": {"num_bytes": 1196720, "checksum": "679d69df99113d8ca6416eda5583a81ab324f5438097216a416d09e745d2f668"}}, "download_size": 1196720, "post_processing_size": null, "dataset_size": 57466242, "size_in_bytes": 58662962}, "terra": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\nTextual Entailment Recognition has been proposed recently as a generic task that captures major semantic inference\nneeds across many NLP applications, such as Question Answering, Information Retrieval, Information Extraction,\nand Text Summarization. This task requires to recognize, given two text fragments, whether the meaning of one text is\nentailed (can be inferred) from the other text.\n", "citation": "\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/TERRa", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "terra", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 1409243, "num_examples": 2616, "dataset_name": "russian_super_glue"}, "validation": {"name": "validation", "num_bytes": 161485, "num_examples": 307, "dataset_name": "russian_super_glue"}, "test": {"name": "test", "num_bytes": 1713499, "num_examples": 3198, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/TERRa": {"num_bytes": 907346, "checksum": "fc7320210b5b6f7087615f13558868de55f46d5e0e365d9d82968c66e6e0dba7"}}, "download_size": 907346, "post_processing_size": null, "dataset_size": 3284227, "size_in_bytes": 4191573}, "russe": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\nWiC: The Word-in-Context Dataset A reliable benchmark for the evaluation of context-sensitive word embeddings.\nDepending on its context, an ambiguous word can refer to multiple, potentially unrelated, meanings. Mainstream static\nword embeddings, such as Word2vec and GloVe, are unable to reflect this dynamic semantic nature. Contextualised word\nembeddings are an attempt at addressing this limitation by computing dynamic representations for words which can adapt\nbased on context.\nRussian SuperGLUE task borrows original data from the Russe project, Word Sense Induction and Disambiguation\nshared task (2018)\n", "citation": "@inproceedings{RUSSE2018,\n author = {Panchenko, Alexander and Lopukhina, Anastasia and Ustalov, Dmitry and Lopukhin, Konstantin and Arefyev,\n Nikolay and Leontyev, Alexey and Loukachevitch, Natalia},\n title = {{RUSSE'2018: A Shared Task on Word Sense Induction for the Russian Language}},\n booktitle = {Computational Linguistics and Intellectual Technologies:\n Papers from the Annual International Conference ``Dialogue''},\n year = {2018},\n pages = {547--564},\n url = {http://www.dialog-21.ru/media/4539/panchenkoaplusetal.pdf},\n address = {Moscow, Russia},\n publisher = {RSUH},\n issn = {2221-7932},\n language = {english},\n}\n\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/RUSSE", "license": "", "features": {"word": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "start1": {"dtype": "int32", "id": null, "_type": "Value"}, "start2": {"dtype": "int32", "id": null, "_type": "Value"}, "end1": {"dtype": "int32", "id": null, "_type": "Value"}, "end2": {"dtype": "int32", "id": null, "_type": "Value"}, "gold_sense1": {"dtype": "int32", "id": null, "_type": "Value"}, "gold_sense2": {"dtype": "int32", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["False", "True"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "russe", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 6913280, "num_examples": 19845, "dataset_name": "russian_super_glue"}, "validation": {"name": "validation", "num_bytes": 2957491, "num_examples": 8505, "dataset_name": "russian_super_glue"}, "test": {"name": "test", "num_bytes": 10046000, "num_examples": 18892, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/RUSSE": {"num_bytes": 3806009, "checksum": "60ecf42ea0f3893e857e0a9522ab92a2ae2ec713d1ab361f2f6f594d0f5324a5"}}, "download_size": 3806009, "post_processing_size": null, "dataset_size": 19916771, "size_in_bytes": 23722780}, "rwsd": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\nA Winograd schema is a pair of sentences that differ in only one or two words and that contain an ambiguity that is\nresolved in opposite ways in the two sentences and requires the use of world knowledge and reasoning for its resolution.\nThe schema takes its name from a well-known example by Terry Winograd.\nThe set would then be presented as a challenge for AI programs, along the lines of the Turing test. The strengths of\nthe challenge are that it is clear-cut, in that the answer to each schema is a binary choice; vivid, in that it is\nobvious to non-experts that a program that fails to get the right answers clearly has serious gaps in its understanding;\nand difficult, in that it is far beyond the current state of the art.\n", "citation": "\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/RWSD", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "span1_index": {"dtype": "int32", "id": null, "_type": "Value"}, "span2_index": {"dtype": "int32", "id": null, "_type": "Value"}, "span1_text": {"dtype": "string", "id": null, "_type": "Value"}, "span2_text": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["False", "True"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "rwsd", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 132274, "num_examples": 606, "dataset_name": "russian_super_glue"}, "validation": {"name": "validation", "num_bytes": 87959, "num_examples": 204, "dataset_name": "russian_super_glue"}, "test": {"name": "test", "num_bytes": 59051, "num_examples": 154, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/RWSD": {"num_bytes": 40508, "checksum": "65894cf114f022a0469bbd535f045d50880e03aa822cd1f3693a54b3665fa962"}}, "download_size": 40508, "post_processing_size": null, "dataset_size": 279284, "size_in_bytes": 319792}, "danetqa": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\nDaNetQA is a question answering dataset for yes/no questions. These questions are naturally occurring -- they are\ngenerated in unprompted and unconstrained settings.\n\nEach example is a triplet of (question, passage, answer), with the title of the page as optional additional context.\nThe text-pair classification setup is similar to existing natural language inference tasks.\n\nBy sampling questions from a distribution of information-seeking queries (rather than prompting annotators for\ntext pairs), we observe significantly more challenging examples compared to existing NLI datasets.\n", "citation": "@InProceedings{10.1007/978-3-030-72610-2_4,\nauthor=\"Glushkova, Taisia\nand Machnev, Alexey\nand Fenogenova, Alena\nand Shavrina, Tatiana\nand Artemova, Ekaterina\nand Ignatov, Dmitry I.\",\neditor=\"van der Aalst, Wil M. P.\nand Batagelj, Vladimir\nand Ignatov, Dmitry I.\nand Khachay, Michael\nand Koltsova, Olessia\nand Kutuzov, Andrey\nand Kuznetsov, Sergei O.\nand Lomazova, Irina A.\nand Loukachevitch, Natalia\nand Napoli, Amedeo\nand Panchenko, Alexander\nand Pardalos, Panos M.\nand Pelillo, Marcello\nand Savchenko, Andrey V.\nand Tutubalina, Elena\",\ntitle=\"DaNetQA: A Yes/No Question Answering Dataset for the Russian Language\",\nbooktitle=\"Analysis of Images, Social Networks and Texts\",\nyear=\"2021\",\npublisher=\"Springer International Publishing\",\naddress=\"Cham\",\npages=\"57--68\",\nabstract=\"DaNetQA, a new question-answering corpus, follows BoolQ\u00a0[2] design: it comprises natural yes/no questions.\nEach question is paired with a paragraph from Wikipedia and an answer, derived from the paragraph. The task is to take\nboth the question and a paragraph as input and come up with a yes/no answer, i.e. to produce a binary output. In this\npaper, we present a reproducible approach to DaNetQA creation and investigate transfer learning methods for task and\nlanguage transferring. For task transferring we leverage three similar sentence modelling tasks: 1) a corpus of\nparaphrases, Paraphraser, 2) an NLI task, for which we use the Russian part of XNLI, 3) another question answering task,\nSberQUAD. For language transferring we use English to Russian translation together\nwith multilingual language fine-tuning.\",\nisbn=\"978-3-030-72610-2\"\n}\n\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/DaNetQA", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "passage": {"dtype": "string", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["False", "True"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "danetqa", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 2474006, "num_examples": 1749, "dataset_name": "russian_super_glue"}, "validation": {"name": "validation", "num_bytes": 1076455, "num_examples": 821, "dataset_name": "russian_super_glue"}, "test": {"name": "test", "num_bytes": 1023062, "num_examples": 805, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/DaNetQA": {"num_bytes": 1293761, "checksum": "b5b4bcfe17e1eb16aa13a7aab4ca088871e27b0851468e9a07b9b528bb42fb96"}}, "download_size": 1293761, "post_processing_size": null, "dataset_size": 4573523, "size_in_bytes": 5867284}, "rucos": {"description": "Recent advances in the field of universal language models and transformers require the development of a methodology for\ntheir broad diagnostics and testing for general intellectual skills - detection of natural language inference,\ncommonsense reasoning, ability to perform simple logical operations regardless of text subject or lexicon. For the first\ntime, a benchmark of nine tasks, collected and organized analogically to the SuperGLUE methodology, was developed from\nscratch for the Russian language. We provide baselines, human level evaluation, an open-source framework for evaluating\nmodels and an overall leaderboard of transformer models for the Russian language.\nRussian reading comprehension with Commonsense reasoning (RuCoS) is a large-scale reading comprehension dataset which\nrequires commonsense reasoning. RuCoS consists of queries automatically generated from CNN/Daily Mail news articles;\nthe answer to each query is a text span from a summarizing passage of the corresponding news. The goal of RuCoS is to\nevaluate a machine`s ability of commonsense reasoning in reading comprehension.\n", "citation": "@inproceedings{fenogenova-etal-2020-read,\n title = \"Read and Reason with {M}u{S}e{RC} and {R}u{C}o{S}: Datasets for Machine Reading Comprehension for {R}ussian\",\n author = \"Fenogenova, Alena and\n Mikhailov, Vladislav and\n Shevelev, Denis\",\n booktitle = \"Proceedings of the 28th International Conference on Computational Linguistics\",\n month = dec,\n year = \"2020\",\n address = \"Barcelona, Spain (Online)\",\n publisher = \"International Committee on Computational Linguistics\",\n url = \"https://aclanthology.org/2020.coling-main.570\",\n doi = \"10.18653/v1/2020.coling-main.570\",\n pages = \"6481--6497\",\n abstract = \"The paper introduces two Russian machine reading comprehension (MRC) datasets, called MuSeRC and RuCoS,\n which require reasoning over multiple sentences and commonsense knowledge to infer the answer. The former follows\n the design of MultiRC, while the latter is a counterpart of the ReCoRD dataset. The datasets are included\n in RussianSuperGLUE, the Russian general language understanding benchmark. We provide a comparative analysis\n and demonstrate that the proposed tasks are relatively more complex as compared to the original ones for English.\n Besides, performance results of human solvers and BERT-based models show that MuSeRC and RuCoS represent a challenge\n for recent advanced neural models. We thus hope to facilitate research in the field of MRC for Russian and prompt\n the study of multi-hop reasoning in a cross-lingual scenario.\",\n}\n\n@article{shavrina2020russiansuperglue,\n title={RussianSuperGLUE: A Russian Language Understanding Evaluation Benchmark},\n author={Shavrina, Tatiana and Fenogenova, Alena and Emelyanov, Anton and Shevelev, Denis and Artemova,\n Ekaterina and Malykh, Valentin and Mikhailov, Vladislav and Tikhonova, Maria and Chertok, Andrey and\n Evlampiev, Andrey},\n journal={arXiv preprint arXiv:2010.15925},\n year={2020}\n }\n", "homepage": "https://russiansuperglue.com/tasks/task_info/RuCoS", "license": "", "features": {"passage": {"dtype": "string", "id": null, "_type": "Value"}, "query": {"dtype": "string", "id": null, "_type": "Value"}, "entities": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "idx": {"passage": {"dtype": "int32", "id": null, "_type": "Value"}, "query": {"dtype": "int32", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "russian_super_glue", "config_name": "rucos", "version": {"version_str": "0.0.1", "description": null, "major": 0, "minor": 0, "patch": 1}, "splits": {"train": {"name": "train", "num_bytes": 160095378, "num_examples": 72193, "dataset_name": "russian_super_glue"}, "validation": {"name": "validation", "num_bytes": 16980563, "num_examples": 7577, "dataset_name": "russian_super_glue"}, "test": {"name": "test", "num_bytes": 15535209, "num_examples": 7257, "dataset_name": "russian_super_glue"}}, "download_checksums": {"https://russiansuperglue.com/tasks/download/RuCoS": {"num_bytes": 56208297, "checksum": "e2f42700122e79cfcce792b54df792630f033eb21e14863cede5852f3aa0d078"}}, "download_size": 56208297, "post_processing_size": null, "dataset_size": 192611150, "size_in_bytes": 248819447}}
|
|
|
|
russian_super_glue.py
CHANGED
@@ -394,39 +394,41 @@ class RussianSuperGlue(datasets.GeneratorBasedBuilder):
|
|
394 |
)
|
395 |
|
396 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
397 |
-
dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
|
398 |
-
task_name = _get_task_name_from_data_url(self.config.data_url)
|
399 |
-
dl_dir = os.path.join(dl_dir, task_name)
|
400 |
if self.config.name == "lidirus":
|
|
|
401 |
return [
|
402 |
datasets.SplitGenerator(
|
403 |
name=datasets.Split.TEST,
|
404 |
gen_kwargs={
|
405 |
-
"data_file":
|
406 |
"split": datasets.Split.TEST,
|
407 |
},
|
408 |
),
|
409 |
]
|
410 |
else:
|
|
|
|
|
|
|
|
|
411 |
return [
|
412 |
datasets.SplitGenerator(
|
413 |
name=datasets.Split.TRAIN,
|
414 |
gen_kwargs={
|
415 |
-
"data_file":
|
416 |
"split": datasets.Split.TRAIN,
|
417 |
},
|
418 |
),
|
419 |
datasets.SplitGenerator(
|
420 |
name=datasets.Split.VALIDATION,
|
421 |
gen_kwargs={
|
422 |
-
"data_file":
|
423 |
"split": datasets.Split.VALIDATION,
|
424 |
},
|
425 |
),
|
426 |
datasets.SplitGenerator(
|
427 |
name=datasets.Split.TEST,
|
428 |
gen_kwargs={
|
429 |
-
"data_file":
|
430 |
"split": datasets.Split.TEST,
|
431 |
},
|
432 |
),
|
|
|
394 |
)
|
395 |
|
396 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
|
|
|
|
|
|
397 |
if self.config.name == "lidirus":
|
398 |
+
dl = dl_manager.download_and_extract(self.config.data_url + "/LiDiRus.jsonl")
|
399 |
return [
|
400 |
datasets.SplitGenerator(
|
401 |
name=datasets.Split.TEST,
|
402 |
gen_kwargs={
|
403 |
+
"data_file": dl,
|
404 |
"split": datasets.Split.TEST,
|
405 |
},
|
406 |
),
|
407 |
]
|
408 |
else:
|
409 |
+
train = dl_manager.download_and_extract(self.config.data_url + "/train.jsonl")
|
410 |
+
test = dl_manager.download_and_extract(self.config.data_url + "/test.jsonl")
|
411 |
+
val = dl_manager.download_and_extract(self.config.data_url + "/val.jsonl")
|
412 |
+
|
413 |
return [
|
414 |
datasets.SplitGenerator(
|
415 |
name=datasets.Split.TRAIN,
|
416 |
gen_kwargs={
|
417 |
+
"data_file": train,
|
418 |
"split": datasets.Split.TRAIN,
|
419 |
},
|
420 |
),
|
421 |
datasets.SplitGenerator(
|
422 |
name=datasets.Split.VALIDATION,
|
423 |
gen_kwargs={
|
424 |
+
"data_file": val,
|
425 |
"split": datasets.Split.VALIDATION,
|
426 |
},
|
427 |
),
|
428 |
datasets.SplitGenerator(
|
429 |
name=datasets.Split.TEST,
|
430 |
gen_kwargs={
|
431 |
+
"data_file": test,
|
432 |
"split": datasets.Split.TEST,
|
433 |
},
|
434 |
),
|