|
"""A collocations dictionary of modern Slovene""" |
|
|
|
import xml.etree.ElementTree as ET |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{kosem2018collocations, |
|
title={Collocations dictionary of modern Slovene}, |
|
author={Kosem, Iztok and Krek, Simon and Gantar, Polona and Arhar Holdt, {\v{S}}pela and {\v{C}}ibej, Jaka and Laskowski, Cyprian}, |
|
booktitle={Proceedings of the XVIII EURALEX International Congress: Lexicography in Global Contexts}, |
|
pages={989--997}, |
|
year={2018}, |
|
organization={Znanstvena zalo{\v{z}}ba Filozofske fakultete Univerze v Ljubljani} |
|
}""" |
|
|
|
_DESCRIPTION = """\ |
|
The database of the Collocations Dictionary of Modern Slovene 1.0 contains collocations that were automatically |
|
extracted from the Gigafida 1.0 corpus and then postprocessed. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.cjvt.si/kssj/en/" |
|
|
|
_LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" |
|
|
|
_URLS = { |
|
"slo_collocations": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1250/KSSS.zip" |
|
} |
|
|
|
|
|
class SloCollocations(datasets.GeneratorBasedBuilder): |
|
"""A collocations dictionary of modern Slovene""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
features = datasets.Features({ |
|
"collocation": datasets.Value("string"), |
|
"cluster": datasets.Value("uint32"), |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"sloleks_ids": datasets.Sequence(datasets.Value("string")), |
|
"gramrel": datasets.Value("string"), |
|
"sense": datasets.Value("uint32"), |
|
"id_lex_unit": datasets.Value("string"), |
|
"lex_unit": datasets.Value("string"), |
|
"lex_unit_category": datasets.Value("string") |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS["slo_collocations"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"data_dir": os.path.join(data_dir, "KSSS")} |
|
) |
|
] |
|
|
|
def _generate_examples(self, data_dir): |
|
|
|
CATEGORY_MAPPING = {"samostalnik": "noun", "glagol": "verb", "pridevnik": "adjective", "prislov": "adverb"} |
|
|
|
all_file_paths = [] |
|
for cat_slo in ["glagol", "pridevnik", "prislov", "samostalnik"]: |
|
_curr_dir = os.path.join(data_dir, cat_slo) |
|
for _fname in os.listdir(_curr_dir): |
|
if os.path.isfile(os.path.join(_curr_dir, _fname)) and _fname.endswith(".xml"): |
|
all_file_paths.append(os.path.join(_curr_dir, _fname)) |
|
|
|
idx_ex = 0 |
|
for file_path in all_file_paths: |
|
curr_doc = ET.parse(file_path) |
|
root = curr_doc.getroot() |
|
|
|
header_tag = root.find("header") |
|
lu_tag = header_tag.find("lexical_unit") |
|
|
|
id_lu = lu_tag.attrib["id"] |
|
lu_form = lu_tag.text |
|
category = CATEGORY_MAPPING[header_tag.find("category").text.strip()] |
|
|
|
for idx_sense, sense_tag in enumerate(root.findall(".//sense")): |
|
for gramrel_tag in sense_tag.findall(".//gramrel"): |
|
gramrel = gramrel_tag.attrib["name"] |
|
for colloc_tag in gramrel_tag.findall(".//collocation"): |
|
colloc_form = colloc_tag.attrib["form"] |
|
colloc_cluster = colloc_tag.attrib["cluster"] |
|
|
|
colloc_words, sloleks_ids = [], [] |
|
for comp_tag in colloc_tag.findall("comp"): |
|
colloc_words.append(comp_tag.text) |
|
sloleks_ids.append(comp_tag.attrib["sloleks"]) |
|
|
|
yield idx_ex, { |
|
"collocation": colloc_form, |
|
"cluster": int(colloc_cluster), |
|
"words": colloc_words, |
|
"sloleks_ids": sloleks_ids, |
|
"gramrel": gramrel, |
|
"sense": idx_sense, |
|
"id_lex_unit": id_lu, |
|
"lex_unit": lu_form, |
|
"lex_unit_category": category, |
|
} |
|
idx_ex += 1 |
|
|