Datasets:
Languages:
Slovenian
Multilinguality:
monolingual
Language Creators:
found
Annotations Creators:
no-annotation
License:
"""(A publicly available subsample of) a reference corpus of Slovene texts.""" | |
import glob | |
import logging | |
import os | |
import os.path | |
import re | |
import xml.etree.ElementTree as ET | |
from copy import deepcopy | |
from typing import Optional | |
import datasets | |
XML_NAMESPACE = "{http://www.w3.org/XML/1998/namespace}" | |
def namespace(element): | |
# https://stackoverflow.com/a/12946675 | |
m = re.match(r'\{.*\}', element.tag) | |
return m.group(0) if m else '' | |
_CITATION = """\ | |
@misc{ccGigafida, | |
title = {Written corpus {ccGigafida} 1.0}, | |
author = {Logar, Nata{\v s}a and Erjavec, Toma{\v z} and Krek, Simon and Gr{\v c}ar, Miha and Holozan, Peter}, | |
url = {http://hdl.handle.net/11356/1035}, | |
note = {Slovenian language resource repository {CLARIN}.{SI}}, | |
copyright = {Creative Commons - Attribution-{NonCommercial}-{ShareAlike} 4.0 International ({CC} {BY}-{NC}-{SA} 4.0)}, | |
issn = {2820-4042}, | |
year = {2013} | |
} | |
""" | |
_DESCRIPTION = """\ | |
The ccGigafida corpus contains a subsample of the Gigafida corpus. The Gigafida corpus is an extensive collection of | |
Slovene text of various genres, from daily newspapers, magazines, all kinds of books (fiction, non-fiction, textbooks), | |
web pages, transcriptions of parliamentary debates and similar. | |
""" | |
_HOMEPAGE = "http://eng.slovenscina.eu/korpusi/proste-zbirke" | |
_LICENSE = "Creative Commons - Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" | |
_URLS = { | |
"ccGigafida": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1035/ccGigafidaV1_0.zip" | |
} | |
class CcGigafida(datasets.GeneratorBasedBuilder): | |
"""(A publicly available subsample of) a reference corpus of Slovene texts.""" | |
VERSION = datasets.Version("1.0.1") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="public", version=VERSION, | |
description="Load the publicly available dataset (ccGigafida)."), | |
datasets.BuilderConfig(name="private", version=VERSION, | |
description="Load the privately available dataset (Gigafida/Gigafida2) by manuallly " | |
"providing the path to the data."), | |
] | |
DEFAULT_CONFIG_NAME = "public" | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"id_doc": datasets.Value("string"), | |
"doc_title": datasets.Value("string"), | |
"authors": datasets.Sequence(datasets.Value("string")), | |
"publish_date": datasets.Value("string"), | |
"publisher": datasets.Value("string"), | |
"genres": datasets.Sequence(datasets.Value("string")), | |
"doc_tokenized": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))), | |
"doc_lemmas": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))), | |
"doc_msds": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("string")))), | |
"doc_string": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), | |
"id_sents": datasets.Sequence(datasets.Sequence(datasets.Value("string"))) | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
if self.config.name == "public": | |
urls = _URLS["ccGigafida"] | |
data_dir = dl_manager.download_and_extract(urls) | |
data_dir = os.path.join(data_dir, "ccGigafidaV1_0") | |
else: | |
if dl_manager.manual_dir is None or not os.path.exists(dl_manager.manual_dir): | |
logging.warning("data_dir does not point to a valid directory") | |
# Allow user to specify path to the private Gigafida directory: `load_dataset(..., data_dir=...)` | |
data_dir = dl_manager.manual_dir | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={"data_dir": data_dir} | |
) | |
] | |
def _generate_examples(self, data_dir): | |
GENRE_MAPPING = { | |
"SSJ.T": "tisk", "SSJ.T.K": "tisk/knjižno", "SSJ.T.K.L": "tisk/knjižno/leposlovno", | |
"SSJ.T.K.S": "tisk/knjižno/strokovno", "SSJ.T.P": "tisk/periodično", "SSJ.T.P.C": "tisk/periodično/časopis", | |
"SSJ.T.P.R": "tisk/periodično/revija", "SSJ.T.D": "tisk/drugo", "SSJ.I": "internet" | |
} | |
# genres are prefixed by "ssj:" in Gigafida 2.0 | |
for genre, description in deepcopy(GENRE_MAPPING).items(): | |
GENRE_MAPPING[f"ssj:{genre}"] = description | |
# Recursively search for xml files in subdirectories | |
all_files = [os.path.join(data_dir, file_name) | |
for file_name in glob.glob(os.path.join(data_dir, "**", "*.xml"), recursive=True) | |
if os.path.isfile(os.path.join(data_dir, file_name))] | |
all_files = sorted(all_files) # fix order | |
for _idx_file, file_path in enumerate(all_files): | |
curr_doc = ET.parse(file_path) | |
root = curr_doc.getroot() | |
NAMESPACE = namespace(root) | |
id_doc = root.attrib[f"{XML_NAMESPACE}id"] | |
# Document metadata | |
bibl_el = root.find(f".//{NAMESPACE}bibl") | |
doc_title = bibl_el.find(f"{NAMESPACE}title").text.strip() | |
authors = list(map(lambda _tag: _tag.text.strip(), bibl_el.findall(f"{NAMESPACE}author"))) | |
publish_date = bibl_el.find(f"{NAMESPACE}date").text.strip() | |
publisher = bibl_el.find(f"{NAMESPACE}publisher").text.strip() | |
category_tags = root.findall(f".//{NAMESPACE}catRef") | |
genres = [] | |
for _tag in category_tags: | |
# in ccGigafida, the genres are noted with a "#" prefix | |
__tag = _tag.attrib["target"][1:] if _tag.attrib["target"].startswith("#") else _tag.attrib["target"] | |
mapped_tag = GENRE_MAPPING.get(__tag, None) | |
# In addition to the genre of the document, there is sometimes a category assigned by the deduplication tool (dedup:nodup) | |
if mapped_tag is None: | |
continue | |
genres.append(mapped_tag) | |
# Tokenized and raw string version - raw string version preserves spaces | |
body_tag = root.find(f".//{NAMESPACE}body") | |
tokenized_doc, doc_str = [], [] | |
doc_sent_ids = [] | |
doc_msds, doc_lemmas = [], [] | |
for para_tag in body_tag.findall(f".//{NAMESPACE}p"): | |
id_para = para_tag.attrib[f"{XML_NAMESPACE}id"] | |
tokenized_para, para_str = [], [] | |
para_msds, para_lemmas = [], [] | |
para_sent_ids = [] | |
for _idx_sent, sent_tag in enumerate(para_tag.findall(f".//{NAMESPACE}s")): | |
# ccGigafida does not have sentence IDs: | |
# construct ID by taking the paragraph ID + their index in the paragraph | |
id_sent = sent_tag.attrib.get(f"{XML_NAMESPACE}id", None) | |
if id_sent is None: | |
id_sent = f"{id_para}.{_idx_sent}" | |
tokenized_sent, str_sent = [], [] | |
msd_tags, lemmas = [], [] | |
for child_tag in sent_tag: | |
tag_str = child_tag.tag[len(NAMESPACE):] | |
if tag_str not in {"w", "S", "c", "pc"}: | |
logging.warning(f"Found unexpected tag in a sentence: '{tag_str}', skipping it.") | |
continue | |
# Tag for whitespace in ccGigafida | |
if tag_str == "S": | |
str_sent.append(" ") | |
# Tag for: | |
# - single-letter characters in ccGigafida; | |
# - whitespace in Gigafida | |
elif tag_str == "c": | |
str_sent.append(child_tag.text) | |
if child_tag.text != " ": | |
tokenized_sent.append(child_tag.text) | |
msd_tags.append(child_tag.attrib["ana"][len("mte:"):] if "ana" in child_tag.attrib else "") | |
lemmas.append(child_tag.text) | |
# word or punctuation character | |
else: | |
str_sent.append(child_tag.text) | |
tokenized_sent.append(child_tag.text) | |
msd_tags.append(child_tag.attrib["ana"][len("mte:"):] if "ana" in child_tag.attrib else child_tag.attrib["msd"]) | |
lemmas.append(child_tag.attrib["lemma"] if "lemma" in child_tag.attrib else child_tag.text) | |
str_sent = "".join(str_sent) | |
tokenized_para.append(tokenized_sent) | |
para_str.append(str_sent) | |
para_sent_ids.append(id_sent) | |
para_msds.append(msd_tags) | |
para_lemmas.append(lemmas) | |
tokenized_doc.append(tokenized_para) | |
doc_str.append(para_str) | |
doc_sent_ids.append(para_sent_ids) | |
doc_msds.append(para_msds) | |
doc_lemmas.append(para_lemmas) | |
yield _idx_file, { | |
"id_doc": id_doc, | |
"doc_title": doc_title, | |
"authors": authors, | |
"publish_date": publish_date, | |
"publisher": publisher, | |
"genres": genres, | |
"doc_tokenized": tokenized_doc, | |
"doc_lemmas": doc_lemmas, | |
"doc_msds": doc_msds, | |
"doc_string": doc_str, | |
"id_sents": doc_sent_ids | |
} | |