Datasets:
cjvt
/

sloie / sloie.py
Matej Klemen
Add initial version of SloIE parsing script
7524c13
""" SloIE is a manually labelled dataset of Slovene idiomatic expressions. """
import os
import datasets
_CITATION = """\
@article{skvorc2022mice,
title = {MICE: Mining Idioms with Contextual Embeddings},
journal = {Knowledge-Based Systems},
volume = {235},
pages = {107606},
year = {2022},
issn = {0950-7051},
doi = {https://doi.org/10.1016/j.knosys.2021.107606},
url = {https://www.sciencedirect.com/science/article/pii/S0950705121008686},
author = {{\v S}kvorc, Tadej and Gantar, Polona and Robnik-{\v S}ikonja, Marko},
}
"""
_DESCRIPTION = """\
SloIE is a manually labelled dataset of Slovene idiomatic expressions.
It contains 29,400 sentences with 75 different expressions that can occur with either a literal or an idiomatic meaning,
with appropriate manual annotations for each token. The idiomatic expressions were selected from the Slovene Lexical
Database (http://hdl.handle.net/11356/1030). Only expressions that can occur with both a literal and an idiomatic
meaning were selected. The sentences were extracted from the Gigafida corpus.
"""
_HOMEPAGE = "http://hdl.handle.net/11356/1030"
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)"
_URLS = {
"sloie": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1335/SloIE.zip"
}
class SloIE(datasets.GeneratorBasedBuilder):
""" SloIE is a manually labelled dataset of Slovene idiomatic expressions. """
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"expression": datasets.Value("string"),
"word_order": datasets.Sequence(datasets.Value("int32")),
"sentence_words": datasets.Sequence(datasets.Value("string")),
"is_idiom": datasets.Sequence(datasets.Value("string"))
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS["sloie"]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": os.path.join(data_dir, "SloIE.txt")}
)
]
def _generate_examples(self, file_path):
idx_instance = 0
with open(file_path, "r", encoding="utf-8") as f:
line = f.readline().strip()
while line:
assert line.startswith("#")
sent = line[1:] # Remove initial "#"
word_order = list(map(int, f.readline().strip().split(" ")))
expression = ""
sentence_words, idiomaticity = [], []
line = f.readline().strip()
while line:
token_info = line.split("\t")
word, is_idiomatic_str, expression = token_info
sentence_words.append(word)
idiomaticity.append(is_idiomatic_str)
line = f.readline().strip()
# Encountered start of the next sentence - Note that "#" may also be an annotated word, hence the second condition
if line.startswith("#") and len(line.split("\t")) == 1:
break
yield idx_instance, {
"sentence": sent,
"expression": expression,
"word_order": word_order,
"sentence_words": sentence_words,
"is_idiom": idiomaticity
}
idx_instance += 1