|
"""SloWNet is a semantic lexicon of Slovene.""" |
|
|
|
|
|
import os |
|
import xml.etree.ElementTree as ET |
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{fiser2012slownet, |
|
title={sloWNet 3.0: development, extension and cleaning}, |
|
author={Fi{\v{s}}er, Darja and Novak, Jernej and Erjavec, Toma{\v{z}}}, |
|
booktitle={Proceedings of 6th International Global Wordnet Conference (GWC 2012)}, |
|
pages={113--117}, |
|
year={2012} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
sloWNet is the Slovene WordNet developed in the expand approach: it contains the complete Princeton WordNet 3.0 and |
|
over 70 000 Slovene literals. These literals have been added automatically using different types of existing resources, |
|
such as bilingual dictionaries, parallel corpora and Wikipedia. 33 000 literals have been subsequently hand-validated. |
|
""" |
|
|
|
_HOMEPAGE = "http://hdl.handle.net/11356/1026" |
|
|
|
_LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" |
|
|
|
_URLS = { |
|
"slownet": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1026/slownet.zip" |
|
} |
|
|
|
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" |
|
UNK_INT = -1 |
|
UNK_STR = "N/A" |
|
|
|
|
|
class SloWNet(datasets.GeneratorBasedBuilder): |
|
"""SloWNet is a semantic lexicon of Slovene.""" |
|
|
|
VERSION = datasets.Version("3.1.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"pos": datasets.Value("string"), |
|
"bcs": datasets.Value("int32"), |
|
"en_synonyms": { |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"senses": datasets.Sequence(datasets.Value("int32")), |
|
"pwnids": datasets.Sequence(datasets.Value("string")) |
|
}, |
|
"sl_synonyms": { |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"is_validated": datasets.Sequence(datasets.Value("bool")) |
|
}, |
|
"en_def": datasets.Value("string"), |
|
"sl_def": datasets.Value("string"), |
|
"en_usages": datasets.Sequence(datasets.Value("string")), |
|
"sl_usages": datasets.Sequence(datasets.Value("string")), |
|
"ilrs": { |
|
"types": datasets.Sequence(datasets.Value("string")), |
|
"id_synsets": datasets.Sequence(datasets.Value("string")) |
|
}, |
|
"semeval07_cluster": datasets.Value("string"), |
|
"domains": datasets.Sequence(datasets.Value("string")) |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS["slownet"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"file_path": os.path.join(data_dir, "slownet-2015-05-07.xml")} |
|
) |
|
] |
|
|
|
def _generate_examples(self, file_path): |
|
curr_doc = ET.parse(file_path) |
|
root = curr_doc.getroot() |
|
|
|
for idx_synset, synset in enumerate(root.iterfind("SYNSET")): |
|
id_synset = synset.find("ID").text.strip() |
|
pos_tag = synset.find("POS").text.strip() |
|
|
|
|
|
base_concept_set = synset.find("BCS") |
|
if base_concept_set is not None: |
|
base_concept_set = int(base_concept_set.text.strip()) |
|
else: |
|
base_concept_set = UNK_INT |
|
|
|
en_synonyms = {"words": [], "senses": [], "pwnids": []} |
|
sl_synonyms = {"words": [], "is_validated": []} |
|
|
|
for curr_synonyms in synset.findall("SYNONYM"): |
|
lang = curr_synonyms.attrib[f"{XML_NAMESPACE}lang"] |
|
|
|
if lang == "en": |
|
for syn in curr_synonyms.iterfind("LITERAL"): |
|
en_synonyms["words"].append(syn.text.strip()) |
|
en_synonyms["senses"].append(int(syn.attrib["sense"])) |
|
en_synonyms["pwnids"].append(syn.attrib["pwnid"]) |
|
else: |
|
for syn in curr_synonyms.iterfind("LITERAL"): |
|
sl_synonyms["words"].append(syn.text.strip()) |
|
sl_synonyms["is_validated"].append(syn.attrib.get("lnote", "auto") == "manual") |
|
|
|
|
|
sl_def, en_def = UNK_STR, UNK_STR |
|
for curr_def in synset.findall("DEF"): |
|
lang = curr_def.attrib[f"{XML_NAMESPACE}lang"] |
|
if lang == "en": |
|
en_def = curr_def.text.strip() |
|
else: |
|
sl_def = curr_def.text.strip() |
|
|
|
en_usages, sl_usages = [], [] |
|
|
|
for curr_usage in synset.findall("USAGE"): |
|
lang = curr_usage.attrib[f"{XML_NAMESPACE}lang"] |
|
TARGET_LIST = en_usages if lang == "en" else sl_usages |
|
TARGET_LIST.append(curr_usage.text.strip()) |
|
|
|
|
|
ilrs = {"types": [], "id_synsets": []} |
|
for curr_ilr in synset.findall("ILR"): |
|
ilrs["types"].append(curr_ilr.attrib["type"]) |
|
ilrs["id_synsets"].append(curr_ilr.text.strip()) |
|
|
|
|
|
semeval07_cluster = synset.find("CLUSTER") |
|
if semeval07_cluster is not None: |
|
semeval07_cluster = semeval07_cluster.text.strip() |
|
else: |
|
semeval07_cluster = UNK_STR |
|
|
|
|
|
domains = list(map(lambda el: el.text.strip(), synset.findall("DOMAIN"))) |
|
|
|
yield idx_synset, { |
|
"id": id_synset, |
|
"pos": pos_tag, |
|
"bcs": base_concept_set, |
|
"en_synonyms": en_synonyms, |
|
"sl_synonyms": sl_synonyms, |
|
"en_def": en_def, |
|
"sl_def": sl_def, |
|
"en_usages": en_usages, |
|
"sl_usages": sl_usages, |
|
"ilrs": ilrs, |
|
"semeval07_cluster": semeval07_cluster, |
|
"domains": domains |
|
} |
|
|