rosetta_balcanica / rosetta_balcanica.py
sudarshan85's picture
TEDHRLR done. Not working
1ad7203
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a data loading script for the dataset Rosetta Balcanica."""
#!/usr/bin/env python
import datasets, logging
from itertools import permutations
logging.basicConfig(format='[%(name)s] %(levelname)s -> %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
_SUPPORTED_WB_LANGS = ['en', 'ma', 'sh']
_VALID_LANGUAGE_PAIRS = [lang_pair for lang_pair in permutations(_SUPPORTED_WB_LANGS, 2) if lang_pair[0] == 'en' or lang_pair[1] == 'en']
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION="""\
@InProceedings{rosetta-balcanica,
title = {Rosetta Balcanica: A Parallel Neural Machine Translation (NMT) Training Dataset for Low-Resource Western Balkans Languages},
author={Edmon Begoli, Maria Mahbub, Sudarshan Srinivasan},
year={2021}
}
"""
_DESCRIPTION="""
Rosetta-Balcanica is a set of evaluation datasets for low resource western Balkan languages manually sourced from articles from OSCE website.
"""
_HOMEPAGE='https://github.com/ebegoli/rosetta-balcanica'
_DATA_URL='https://github.com/ebegoli/rosetta-balcanica/raw/main/rosetta_balcanica.tar.gz'
_VERSION=datasets.Version('1.0.0')
class RosettaBalcanicaConfig(datasets.BuilderConfig):
"""BuilderConfig for Rosetta Balcanica for low resource West Balcan languages
"""
def __init__(self, lang_pair=(None, None), **kwargs):
assert lang_pair in _VALID_LANGUAGE_PAIRS, (f"Language pair {lang_pair} not supported (yet)")
name = f'{lang_pair[0]} to {lang_pair[1]}'
desc = f'Translation dataset from {lang_pair[0]} to {lang_pair[1]}'
super(RosettaBalcanicaConfig, self).__init__(
name=name,
description=desc,
version=_VERSION,
**kwargs
)
self.lang_pair = lang_pair
class RoesettaBalcancia(datasets.GeneratorBasedBuilder):
logger.debug("i'm in builder")
BUILDER_CONFIGS = [
RosettaBalcanicaConfig(
lang_pair=lang_pair,
versino=_VERSION,
)
for lang_pair in _VALID_LANGUAGE_PAIRS
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{'translation': datasets.features.Translation(languages=self.config.lang_pair)}
),
homepage=_HOMEPAGE,
supervised_keys=self.config.lang_pair,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DATA_URL)
source,target = self.config.lang_pair
non_en = source if target == 'en' else target
data_dir = f'en-{non_en}'
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
'source_file': f'{data_dir}/train_{source}.txt',
'target_file': f'{data_dir}/train_{target}.txt',
'files': dl_manager.iter_archive(archive)
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
'source_file': f'{data_dir}/test_{source}.txt',
'target_file': f'{data_dir}/test_{target}.txt',
'files': dl_manager.iter_archive(archive)
}
),
]
def _generate_examples(self, source_file, target_file, files):
source_sents, target_sents = None, None
for path, f in files:
if path == source_file:
source_sents = f.read().decode('utf-8').split('\n')
elif path == target_file:
target_sents = f.read().decode('utf-8').split('\n')
if source_sents is not None and target_sents is not None:
break
assert len(target_sents) == len(source_sents), (f"Sizes do not match: {len(source_sents) vs len(target_sents)} for {source_file} vs {target_file}")
source,target = self.config.lang_pair
for idx, (l1, l2) in enumerate(zip(source_sents, target_sents)):
result = {
'translation': {source: l1, target: l2}
}
if all(result.values()):
yield idx, result