Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
4cefaff
1 Parent(s): 7a0a828

Delete loading script

Browse files
Files changed (1) hide show
  1. xcsr.py +0 -192
xcsr.py DELETED
@@ -1,192 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """XCSR: A dataset for cross-lingual commonsense reasoning."""
16
-
17
-
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- # X-CSR
26
- @inproceedings{lin-etal-2021-common,
27
- title = "Common Sense Beyond {E}nglish: Evaluating and Improving Multilingual Language Models for Commonsense Reasoning",
28
- author = "Lin, Bill Yuchen and
29
- Lee, Seyeon and
30
- Qiao, Xiaoyang and
31
- Ren, Xiang",
32
- booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
33
- month = aug,
34
- year = "2021",
35
- address = "Online",
36
- publisher = "Association for Computational Linguistics",
37
- url = "https://aclanthology.org/2021.acl-long.102",
38
- doi = "10.18653/v1/2021.acl-long.102",
39
- pages = "1274--1287",
40
- }
41
-
42
- # CSQA
43
- @inproceedings{Talmor2019commonsenseqaaq,
44
- address = {Minneapolis, Minnesota},
45
- author = {Talmor, Alon and Herzig, Jonathan and Lourie, Nicholas and Berant, Jonathan},
46
- booktitle = {Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)},
47
- doi = {10.18653/v1/N19-1421},
48
- pages = {4149--4158},
49
- publisher = {Association for Computational Linguistics},
50
- title = {CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge},
51
- url = {https://www.aclweb.org/anthology/N19-1421},
52
- year = {2019}
53
- }
54
-
55
- # CODAH
56
- @inproceedings{Chen2019CODAHAA,
57
- address = {Minneapolis, USA},
58
- author = {Chen, Michael and D{'}Arcy, Mike and Liu, Alisa and Fernandez, Jared and Downey, Doug},
59
- booktitle = {Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for {NLP}},
60
- doi = {10.18653/v1/W19-2008},
61
- pages = {63--69},
62
- publisher = {Association for Computational Linguistics},
63
- title = {CODAH: An Adversarially-Authored Question Answering Dataset for Common Sense},
64
- url = {https://www.aclweb.org/anthology/W19-2008},
65
- year = {2019}
66
- }
67
- """
68
-
69
- _DESCRIPTION = """\
70
- To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future.
71
- """
72
-
73
- _HOMEPAGE = "https://inklab.usc.edu//XCSR/"
74
-
75
- # TODO: Add the licence for the dataset here if you can find it
76
- # _LICENSE = ""
77
-
78
- # The HuggingFace dataset library don't host the datasets but only point to the original files
79
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
80
-
81
- _URL = "https://inklab.usc.edu/XCSR/xcsr_datasets.zip"
82
-
83
- _LANGUAGES = ("en", "zh", "de", "es", "fr", "it", "jap", "nl", "pl", "pt", "ru", "ar", "vi", "hi", "sw", "ur")
84
-
85
-
86
- class XcsrConfig(datasets.BuilderConfig):
87
- """BuilderConfig for XCSR."""
88
-
89
- def __init__(self, subset: str, language: str, **kwargs):
90
- """BuilderConfig for XCSR.
91
- Args:
92
- language: One of {en, zh, de, es, fr, it, jap, nl, pl, pt, ru, ar, vi, hi, sw, ur}, or all_languages
93
- **kwargs: keyword arguments forwarded to super.
94
- """
95
- super().__init__(name=f"{subset}-{language}", **kwargs)
96
- self.subset = subset
97
- self.language = language
98
-
99
-
100
- class Xcsr(datasets.GeneratorBasedBuilder):
101
- """XCSR: A dataset for evaluating multi-lingual language models (ML-LMs) for commonsense reasoning in a
102
- cross-lingual zero-shot transfer setting"""
103
-
104
- BUILDER_CONFIG_CLASS = XcsrConfig
105
- BUILDER_CONFIGS = [
106
- XcsrConfig(
107
- subset="X-CSQA",
108
- language=lang,
109
- version=datasets.Version("1.1.0", ""),
110
- description=f"Plain text import of X-CSQA for the {lang} language",
111
- )
112
- for lang in _LANGUAGES
113
- ] + [
114
- XcsrConfig(
115
- subset="X-CODAH",
116
- language=lang,
117
- version=datasets.Version("1.1.0", ""),
118
- description=f"Plain text import of X-CODAH for the {lang} language",
119
- )
120
- for lang in _LANGUAGES
121
- ]
122
-
123
- def _info(self):
124
- if self.config.subset == "X-CSQA":
125
- features = datasets.Features(
126
- {
127
- "id": datasets.Value("string"),
128
- "lang": datasets.Value("string"),
129
- "question": {
130
- "stem": datasets.Value("string"),
131
- "choices": datasets.features.Sequence(
132
- {
133
- "label": datasets.Value("string"),
134
- "text": datasets.Value("string"),
135
- }
136
- ),
137
- },
138
- "answerKey": datasets.Value("string"),
139
- }
140
- )
141
- elif self.config.subset == "X-CODAH":
142
- features = datasets.Features(
143
- {
144
- "id": datasets.Value("string"),
145
- "lang": datasets.Value("string"),
146
- "question_tag": datasets.Value("string"),
147
- "question": {
148
- "stem": datasets.Value("string"),
149
- "choices": datasets.features.Sequence(
150
- {
151
- "label": datasets.Value("string"),
152
- "text": datasets.Value("string"),
153
- }
154
- ),
155
- },
156
- "answerKey": datasets.Value("string"),
157
- }
158
- )
159
-
160
- return datasets.DatasetInfo(
161
- description=_DESCRIPTION,
162
- features=features,
163
- homepage=_HOMEPAGE,
164
- citation=_CITATION,
165
- )
166
-
167
- def _split_generators(self, dl_manager):
168
- """Returns SplitGenerators."""
169
- data_dir = dl_manager.download_and_extract(_URL)
170
- filepath = os.path.join(data_dir, "X-CSR_datasets", self.config.subset, self.config.language, "{split}.jsonl")
171
- return [
172
- datasets.SplitGenerator(
173
- name=datasets.Split.TEST,
174
- gen_kwargs={
175
- "filepath": filepath.format(split="test"),
176
- },
177
- ),
178
- datasets.SplitGenerator(
179
- name=datasets.Split.VALIDATION,
180
- gen_kwargs={
181
- "filepath": filepath.format(split="dev"),
182
- },
183
- ),
184
- ]
185
-
186
- def _generate_examples(self, filepath):
187
- """Yields examples as (key, example) tuples."""
188
- with open(filepath, encoding="utf-8") as f:
189
- for key, row in enumerate(f):
190
- data = json.loads(row)
191
- _ = data.setdefault("answerKey", "")
192
- yield key, data