File size: 3,650 Bytes
7969f2f 95bbcf7 7969f2f 95bbcf7 7969f2f 95bbcf7 7969f2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
"""CoSimLex is a resource for evaluating graded word similarity in context."""
import csv
import datasets
_CITATION = """\
@inproceedings{armendariz-etal-2020-semeval,
title = "{SemEval-2020} {T}ask 3: Graded Word Similarity in Context ({GWSC})",
author = "Armendariz, Carlos S. and
Purver, Matthew and
Pollak, Senja and
Ljube{\v{s}}i{\'{c}}, Nikola and
Ul{\v{c}}ar, Matej and
Robnik-{\v{S}}ikonja, Marko and
Vuli{\'{c}}, Ivan and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 14th International Workshop on Semantic Evaluation",
year = "2020",
address="Online"
}
"""
_DESCRIPTION = """\
The dataset contains human similarity ratings for pairs of words. The annotators were presented with contexts that
contained both of the words in the pair and the dataset features two different contexts per pair. The words were
sourced from the English, Croatian, Finnish and Slovenian versions of the original Simlex dataset.
"""
_HOMEPAGE = "http://hdl.handle.net/11356/1308"
_LICENSE = "GNU General Public Licence, version 3"
_URLS = {
"en": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1308/cosimlex_en.csv",
"fi": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1308/cosimlex_fi.csv",
"hr": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1308/cosimlex_hr.csv",
"sl": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1308/cosimlex_sl.csv"
}
class CoSimLex(datasets.GeneratorBasedBuilder):
"""CoSimLex is a resource for evaluating graded word similarity in context."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="en", version=VERSION, description="The English subset."),
datasets.BuilderConfig(name="fi", version=VERSION, description="The Finnish subset."),
datasets.BuilderConfig(name="hr", version=VERSION, description="The Croatian subset."),
datasets.BuilderConfig(name="sl", version=VERSION, description="The Slovenian subset."),
]
def _info(self):
features = datasets.Features(
{
"word1": datasets.Value("string"), "word2": datasets.Value("string"),
"context1": datasets.Value("string"), "context2": datasets.Value("string"),
"sim1": datasets.Value("float32"), "sim2": datasets.Value("float32"),
"stdev1": datasets.Value("float32"), "stdev2": datasets.Value("float32"),
"pvalue": datasets.Value("float32"),
"word1_context1": datasets.Value("string"), "word2_context1": datasets.Value("string"),
"word1_context2": datasets.Value("string"), "word2_context2": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
file_path = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": file_path}
)
]
def _generate_examples(self, file_path):
with open(file_path, encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar='"')
header = next(reader)
for i, row in enumerate(reader):
yield i, {attr: value for attr, value in zip(header, row)}
|