Datasets:

Multilinguality:
multilingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
pasinit commited on
Commit
0f54849
1 Parent(s): 0d4aa4d

dataset loading script

Browse files
Files changed (1) hide show
  1. xlwic.py +231 -0
xlwic.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import datasets
3
+ from datasets.info import DatasetInfo
4
+ from datasets.utils.download_manager import DownloadManager
5
+ import os
6
+
7
+ _DESCRIPTION = """A system's task on any of the XL-WiC datasets is to identify the intended meaning of a word in a context of a given language. XL-WiC is framed as a binary classification task. Each instance in XL-WiC has a target word w, either a verb or a noun, for which two contexts are provided. Each of these contexts triggers a specific meaning of w. The task is to identify if the occurrences of w in the two contexts correspond to the same meaning or not.
8
+
9
+ XL-WiC provides dev and test sets in the following 12 languages:
10
+
11
+ Bulgarian (BG)
12
+ Danish (DA)
13
+ German (DE)
14
+ Estonian (ET)
15
+ Farsi (FA)
16
+ French (FR)
17
+ Croatian (HR)
18
+ Italian (IT)
19
+ Japanese (JA)
20
+ Korean (KO)
21
+ Dutch (NL)
22
+ Chinese (ZH)
23
+ and training sets in the following 3 languages:
24
+
25
+ German (DE)
26
+ French (FR)
27
+ Italian (IT)
28
+ """
29
+ _CITATION = """@inproceedings{raganato-etal-2020-xl-wic,
30
+ title={XL-WiC: A Multilingual Benchmark for Evaluating Semantic Contextualization},
31
+ author={Raganato, Alessandro and Pasini, Tommaso and Camacho-Collados, Jose and Pilehvar, Mohammad Taher},
32
+ booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
33
+ pages={7193--7206},
34
+ year={2020}
35
+ }
36
+ """
37
+ _DOWNLOAD_URL = "https://pilehvar.github.io/xlwic/data/xlwic_datasets.zip"
38
+ _VERSION = "1.0.0"
39
+ _WN_LANGS = ["EN", "BG", "ZH", "HR", "DA", "NL", "ET", "FA", "JA", "KO"]
40
+ _WIKT_LANGS = ["IT", "FR", "DE"]
41
+ _CODE_TO_LANG_ID = {
42
+ "EN": "english",
43
+ "BG": "bulgarian",
44
+ "ZH": "chinese",
45
+ "HR": "croatian",
46
+ "DA": "danish",
47
+ "NL": "dutch",
48
+ "ET": "estonian",
49
+ "FA": "farsi",
50
+ "JA": "japanese",
51
+ "KO": "korean",
52
+ "IT": "italian",
53
+ "FR": "french",
54
+ "DE": "german",
55
+ }
56
+ _AVAILABLE_PAIRS = (
57
+ list(zip(["EN"] * (len(_WN_LANGS) - 1), _WN_LANGS[1:]))
58
+ + list(zip(["EN"] * len(_WIKT_LANGS), _WIKT_LANGS))
59
+ + [("IT", "IT"), ("FR", "FR"), ("DE", "DE")]
60
+ )
61
+
62
+ @dataclass
63
+ class XLWiCConfig(datasets.BuilderConfig):
64
+ version:str=None
65
+ training_lang:str = None
66
+ target_lang:str = None
67
+ name:str = None
68
+
69
+
70
+ class XLWIC(datasets.GeneratorBasedBuilder):
71
+ BUILDER_CONFIGS = [
72
+ XLWiCConfig(
73
+ name=f"xlwic_{source.lower()}_{target.lower()}",
74
+ training_lang=source,
75
+ target_lang=target,
76
+ version=datasets.Version(_VERSION, ""),
77
+ )
78
+ for source, target in _AVAILABLE_PAIRS
79
+ ]
80
+
81
+ def _info(self) -> DatasetInfo:
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=datasets.Features(
85
+ {
86
+ "id": datasets.Value("string"),
87
+ "context_1": datasets.Value("string"),
88
+ "context_2": datasets.Value("string"),
89
+ "target_word": datasets.Value("string"),
90
+ "pos": datasets.Value("string"),
91
+ "target_word_location_1":
92
+ {
93
+ "char_start": datasets.Value("int32"),
94
+ "char_end": datasets.Value("int32"),
95
+ },
96
+ "target_word_location_2":
97
+ {
98
+ "char_start": datasets.Value("int32"),
99
+ "char_end": datasets.Value("int32"),
100
+ },
101
+ "language": datasets.Value("string"),
102
+ "label": datasets.Value("int32"),
103
+ }
104
+ ),
105
+ supervised_keys=None,
106
+ homepage="https://pilehvar.github.io/xlwic/",
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager: DownloadManager):
111
+ downloaded_file = dl_manager.download_and_extract(_DOWNLOAD_URL)
112
+ dataset_root_folder = os.path.join(downloaded_file, "xlwic_datasets")
113
+
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ # These kwargs will be passed to _generate_examples
118
+ gen_kwargs={
119
+ "dataset_root": dataset_root_folder,
120
+ "lang": self.config.training_lang,
121
+ "split": "train",
122
+ },
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.VALIDATION,
126
+ # These kwargs will be passed to _generate_examples
127
+ gen_kwargs={
128
+ "dataset_root": dataset_root_folder,
129
+ "lang": self.config.target_lang,
130
+ "split": "valid",
131
+ },
132
+ ),
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TEST,
135
+ # These kwargs will be passed to _generate_examples
136
+ gen_kwargs={
137
+ "dataset_root": dataset_root_folder,
138
+ "lang": self.config.target_lang,
139
+ "split": "test",
140
+ },
141
+ ),
142
+ ]
143
+
144
+ def _yield_from_lines(self, lines, lang):
145
+
146
+ for i, (
147
+ tw,
148
+ pos,
149
+ char_start_1,
150
+ char_end_1,
151
+ char_start_2,
152
+ char_end_2,
153
+ context_1,
154
+ context_2,
155
+ label,
156
+ ) in enumerate(lines):
157
+ _id = f"{lang}_{i}"
158
+ yield _id, {
159
+ "id": _id,
160
+ "target_word": tw,
161
+ "context_1": context_1,
162
+ "context_2": context_2,
163
+ "label": int(label),
164
+ "target_word_location_1": {
165
+ "char_start": int(char_start_1),
166
+ "char_end": int(char_end_1),
167
+ },
168
+ "target_word_location_2": {
169
+ "char_start": int(char_start_2),
170
+ "char_end": int(char_end_2)
171
+ },
172
+ "pos": pos,
173
+ "language": lang,
174
+ }
175
+
176
+ def _from_selfcontained_file(self, dataset_root, lang, split):
177
+ ext_lang = _CODE_TO_LANG_ID[lang]
178
+ if lang in _WIKT_LANGS:
179
+ path = os.path.join(
180
+ dataset_root,
181
+ "xlwic_wikt",
182
+ f"{ext_lang}_{lang.lower()}",
183
+ f"{lang.lower()}_{split}.txt",
184
+ )
185
+ elif lang != "EN" and lang in _WN_LANGS:
186
+ path = os.path.join(
187
+ dataset_root,
188
+ "xlwic_wn",
189
+ f"{ext_lang}_{lang.lower()}",
190
+ f"{lang.lower()}_{split}.txt",
191
+ )
192
+ elif lang == "EN" and lang in _WN_LANGS:
193
+ path = os.path.join(
194
+ dataset_root, "wic_english", f"{split}_{lang.lower()}.txt"
195
+ )
196
+ with open(path) as lines:
197
+ all_lines = [line.strip().split("\t") for line in lines]
198
+ yield from self._yield_from_lines(all_lines, lang)
199
+
200
+ def _from_test_files(self, dataset_root, lang, split):
201
+ ext_lang = _CODE_TO_LANG_ID[lang]
202
+ if lang in _WIKT_LANGS:
203
+ path_data = os.path.join(
204
+ dataset_root,
205
+ "xlwic_wikt",
206
+ f"{ext_lang}_{lang.lower()}",
207
+ f"{lang.lower()}_{split}_data.txt",
208
+ )
209
+ elif lang != "EN" and lang in _WN_LANGS:
210
+ path_data = os.path.join(
211
+ dataset_root,
212
+ "xlwic_wn",
213
+ f"{ext_lang}_{lang.lower()}",
214
+ f"{lang.lower()}_{split}_data.txt",
215
+ )
216
+ path_gold = path_data.replace('_data.txt', '_gold.txt')
217
+ with open(path_data) as lines:
218
+ all_lines = [line.strip().split("\t") for line in lines]
219
+ with open(path_gold) as lines:
220
+ all_labels = [line.strip() for line in lines]
221
+ for line, label in zip(all_lines, all_labels):
222
+ line.append(label)
223
+ yield from self._yield_from_lines(all_lines, lang)
224
+
225
+
226
+ def _generate_examples(self, dataset_root, lang, split, **kwargs):
227
+ if split in {"train", "valid"}:
228
+ yield from self._from_selfcontained_file(dataset_root, lang, split)
229
+ else:
230
+ yield from self._from_test_files(dataset_root, lang, split)
231
+