Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
semantic-similarity-classification
Size:
100K - 1M
License:
from dataclasses import dataclass | |
import datasets | |
from datasets.info import DatasetInfo | |
from datasets.utils.download_manager import DownloadManager | |
import os | |
_DESCRIPTION = """A system's task on any of the XL-WiC datasets is to identify the intended meaning of a word in a context of a given language. XL-WiC is framed as a binary classification task. Each instance in XL-WiC has a target word w, either a verb or a noun, for which two contexts are provided. Each of these contexts triggers a specific meaning of w. The task is to identify if the occurrences of w in the two contexts correspond to the same meaning or not. | |
XL-WiC provides dev and test sets in the following 12 languages: | |
Bulgarian (BG) | |
Danish (DA) | |
German (DE) | |
Estonian (ET) | |
Farsi (FA) | |
French (FR) | |
Croatian (HR) | |
Italian (IT) | |
Japanese (JA) | |
Korean (KO) | |
Dutch (NL) | |
Chinese (ZH) | |
and training sets in the following 3 languages: | |
German (DE) | |
French (FR) | |
Italian (IT) | |
""" | |
_CITATION = """@inproceedings{raganato-etal-2020-xl-wic, | |
title={XL-WiC: A Multilingual Benchmark for Evaluating Semantic Contextualization}, | |
author={Raganato, Alessandro and Pasini, Tommaso and Camacho-Collados, Jose and Pilehvar, Mohammad Taher}, | |
booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)}, | |
pages={7193--7206}, | |
year={2020} | |
} | |
""" | |
_DOWNLOAD_URL = "https://pilehvar.github.io/xlwic/data/xlwic_datasets.zip" | |
_VERSION = "1.0.0" | |
_WN_LANGS = ["EN", "BG", "ZH", "HR", "DA", "NL", "ET", "FA", "JA", "KO"] | |
_WIKT_LANGS = ["IT", "FR", "DE"] | |
_CODE_TO_LANG_ID = { | |
"EN": "english", | |
"BG": "bulgarian", | |
"ZH": "chinese", | |
"HR": "croatian", | |
"DA": "danish", | |
"NL": "dutch", | |
"ET": "estonian", | |
"FA": "farsi", | |
"JA": "japanese", | |
"KO": "korean", | |
"IT": "italian", | |
"FR": "french", | |
"DE": "german", | |
} | |
_AVAILABLE_PAIRS = ( | |
list(zip(["EN"] * (len(_WN_LANGS) - 1), _WN_LANGS[1:])) | |
+ list(zip(["EN"] * len(_WIKT_LANGS), _WIKT_LANGS)) | |
+ [("IT", "IT"), ("FR", "FR"), ("DE", "DE")] | |
) | |
class XLWiCConfig(datasets.BuilderConfig): | |
version:str=None | |
training_lang:str = None | |
target_lang:str = None | |
name:str = None | |
class XLWIC(datasets.GeneratorBasedBuilder): | |
BUILDER_CONFIGS = [ | |
XLWiCConfig( | |
name=f"xlwic_{source.lower()}_{target.lower()}", | |
training_lang=source, | |
target_lang=target, | |
version=datasets.Version(_VERSION, ""), | |
) | |
for source, target in _AVAILABLE_PAIRS | |
] | |
def _info(self) -> DatasetInfo: | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"id": datasets.Value("string"), | |
"context_1": datasets.Value("string"), | |
"context_2": datasets.Value("string"), | |
"target_word": datasets.Value("string"), | |
"pos": datasets.Value("string"), | |
"target_word_location_1": | |
{ | |
"char_start": datasets.Value("int32"), | |
"char_end": datasets.Value("int32"), | |
}, | |
"target_word_location_2": | |
{ | |
"char_start": datasets.Value("int32"), | |
"char_end": datasets.Value("int32"), | |
}, | |
"language": datasets.Value("string"), | |
"label": datasets.Value("int32"), | |
} | |
), | |
supervised_keys=None, | |
homepage="https://pilehvar.github.io/xlwic/", | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager: DownloadManager): | |
downloaded_file = dl_manager.download_and_extract(_DOWNLOAD_URL) | |
dataset_root_folder = os.path.join(downloaded_file, "xlwic_datasets") | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"dataset_root": dataset_root_folder, | |
"lang": self.config.training_lang, | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"dataset_root": dataset_root_folder, | |
"lang": self.config.target_lang, | |
"split": "valid", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"dataset_root": dataset_root_folder, | |
"lang": self.config.target_lang, | |
"split": "test", | |
}, | |
), | |
] | |
def _yield_from_lines(self, lines, lang): | |
for i, ( | |
tw, | |
pos, | |
char_start_1, | |
char_end_1, | |
char_start_2, | |
char_end_2, | |
context_1, | |
context_2, | |
label, | |
) in enumerate(lines): | |
_id = f"{lang}_{i}" | |
yield _id, { | |
"id": _id, | |
"target_word": tw, | |
"context_1": context_1, | |
"context_2": context_2, | |
"label": int(label), | |
"target_word_location_1": { | |
"char_start": int(char_start_1), | |
"char_end": int(char_end_1), | |
}, | |
"target_word_location_2": { | |
"char_start": int(char_start_2), | |
"char_end": int(char_end_2) | |
}, | |
"pos": pos, | |
"language": lang, | |
} | |
def _from_selfcontained_file(self, dataset_root, lang, split): | |
ext_lang = _CODE_TO_LANG_ID[lang] | |
if lang in _WIKT_LANGS: | |
path = os.path.join( | |
dataset_root, | |
"xlwic_wikt", | |
f"{ext_lang}_{lang.lower()}", | |
f"{lang.lower()}_{split}.txt", | |
) | |
elif lang != "EN" and lang in _WN_LANGS: | |
path = os.path.join( | |
dataset_root, | |
"xlwic_wn", | |
f"{ext_lang}_{lang.lower()}", | |
f"{lang.lower()}_{split}.txt", | |
) | |
elif lang == "EN" and lang in _WN_LANGS: | |
path = os.path.join( | |
dataset_root, "wic_english", f"{split}_{lang.lower()}.txt" | |
) | |
with open(path) as lines: | |
all_lines = [line.strip().split("\t") for line in lines] | |
yield from self._yield_from_lines(all_lines, lang) | |
def _from_test_files(self, dataset_root, lang, split): | |
ext_lang = _CODE_TO_LANG_ID[lang] | |
if lang in _WIKT_LANGS: | |
path_data = os.path.join( | |
dataset_root, | |
"xlwic_wikt", | |
f"{ext_lang}_{lang.lower()}", | |
f"{lang.lower()}_{split}_data.txt", | |
) | |
elif lang != "EN" and lang in _WN_LANGS: | |
path_data = os.path.join( | |
dataset_root, | |
"xlwic_wn", | |
f"{ext_lang}_{lang.lower()}", | |
f"{lang.lower()}_{split}_data.txt", | |
) | |
path_gold = path_data.replace('_data.txt', '_gold.txt') | |
with open(path_data) as lines: | |
all_lines = [line.strip().split("\t") for line in lines] | |
with open(path_gold) as lines: | |
all_labels = [line.strip() for line in lines] | |
for line, label in zip(all_lines, all_labels): | |
line.append(label) | |
yield from self._yield_from_lines(all_lines, lang) | |
def _generate_examples(self, dataset_root, lang, split, **kwargs): | |
if split in {"train", "valid"}: | |
yield from self._from_selfcontained_file(dataset_root, lang, split) | |
else: | |
yield from self._from_test_files(dataset_root, lang, split) | |