|
import datasets |
|
|
|
|
|
_DESCRIPTION = """\ |
|
This datasets consists of monolingual (Sranantongo) and parallel (Sranantongo - Dutch) data. |
|
""" |
|
|
|
_CITATION = """\ |
|
@article{zwennicker2022towards, |
|
title={Towards a general purpose machine translation system for Sranantongo}, |
|
author={Zwennicker, Just and Stap, David}, |
|
journal={arXiv preprint arXiv:2212.06383}, |
|
year={2022} |
|
} |
|
""" |
|
|
|
_DATA_URL = "data/sranantongo.tar" |
|
|
|
_LANGUAGE2FILES = { |
|
"srn": {"train": "srn_mono_SIL.csv", "validation": None, "test": None}, |
|
"srn-nl_jw": {split:f"srn-nl_JW_{split}.csv" for split in ["train", "validation", "test"]}, |
|
"srn-nl_other": {split:f"srn-nl_other_{split}.csv" for split in ["train", "validation", "test"]}, |
|
} |
|
|
|
|
|
class SranantongoConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Sranantongo dataset.""" |
|
|
|
def __init__(self, name=str, **kwargs): |
|
self.name = name |
|
description = "Monolingual sentences in `Sranantongo`." if "mono" in self.name else f"Parallel sentences in `Sranantongo` and `Dutch`." |
|
super(SranantongoConfig, self).__init__(name=self.name, description=description, **kwargs) |
|
|
|
|
|
class Sranantongo(datasets.GeneratorBasedBuilder): |
|
"""Sranantongo data from https://arxiv.org/abs/2212.06383""" |
|
BUILDER_CONFIGS = [ |
|
SranantongoConfig(name=name, version=datasets.Version("1.0.0", "")) |
|
for name in _LANGUAGE2FILES.keys() |
|
] |
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"srn": datasets.Value("string"), |
|
**( |
|
{"nl": datasets.Value("string")} |
|
if "srn-nl" in self.config.name else {} |
|
) |
|
} |
|
), |
|
homepage="https://arxiv.org/abs/2212.06383", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
files = dl_manager.iter_archive(dl_manager.download(_DATA_URL)) |
|
|
|
|
|
generators = [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files, "split": "train"}) |
|
] |
|
|
|
|
|
if "srn-nl" in self.config.name: |
|
for split in [datasets.Split.VALIDATION, datasets.Split.TEST]: |
|
generators.append( |
|
datasets.SplitGenerator(name=split, gen_kwargs={"files": files, "split": split}) |
|
) |
|
|
|
return generators |
|
|
|
|
|
def _generate_examples(self, split, files): |
|
"""Returns examples as raw text.""" |
|
|
|
if "srn-nl" in self.config.name: |
|
return self._generate_examples_parallel(split=split, files=files) |
|
else: |
|
return self._generate_examples_mono(split=split, files=files) |
|
|
|
|
|
def _generate_examples_mono(self, split, files): |
|
for path, file in files: |
|
if path == _LANGUAGE2FILES[self.config.name][split]: |
|
data = file.read().decode("utf-8").split("\n") |
|
|
|
for idx, sentence in enumerate(data): |
|
yield idx, {"srn": sentence} |
|
|
|
|
|
|
|
def _generate_examples_parallel(self, split, files): |
|
for path, file in files: |
|
if path == _LANGUAGE2FILES[self.config.name][split]: |
|
data = file.read().decode("utf-8").split("\n") |
|
|
|
for idx, sentence in enumerate(data): |
|
nl, srn = sentence.split("|") |
|
yield idx, {"nl": nl, "srn": srn} |