EMEA-V3 / EMEA-V3.py
qanastek's picture
Update URL
1cb5e16
# coding=utf-8
# Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
"""EMEA-V3 : European parallel translation corpus from the European Medicines Agency"""
import datasets
import pandas as pd
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@inproceedings{tiedemann-2012-parallel,
title = Parallel Data, Tools and Interfaces in OPUS,
author = {
Tiedemann, Jorg
},
booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)",
month = may,
year = 2012,
address = Istanbul, Turkey,
publisher = European Language Resources Association (ELRA),
url = http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf,
pages = 2214--2218,
abstract = This paper presents the current status of OPUS, a growing language resource of parallel corpora and related tools. The focus in OPUS is to provide freely available data sets in various formats together with basic annotation to be useful for applications in computational linguistics, translation studies and cross-linguistic corpus studies. In this paper, we report about new data sets and their features, additional annotation tools and models provided from the website and essential interfaces and on-line services included in the project.,
}
"""
_LANGUAGE_PAIRS = ["bg-cs","bg-da","bg-de","bg-el","bg-en","bg-es","bg-et","bg-fi","bg-fr","bg-hu","bg-it","bg-lt","bg-lv","bg-mt","bg-nl","bg-pl","bg-pt","bg-ro","bg-sk","bg-sl","bg-sv","cs-da","cs-de","cs-el","cs-en","cs-es","cs-et","cs-fi","cs-fr","cs-hu","cs-it","cs-lt","cs-lv","cs-mt","cs-nl","cs-pl","cs-pt","cs-ro","cs-sk","cs-sl","cs-sv","da-de","da-el","da-en","da-es","da-et","da-fi","da-fr","da-hu","da-it","da-lt","da-lv","da-mt","da-nl","da-pl","da-pt","da-ro","da-sk","da-sl","da-sv","de-el","de-en","de-es","de-et","de-fi","de-fr","de-hu","de-it","de-lt","de-lv","de-mt","de-nl","de-pl","de-pt","de-ro","de-sk","de-sl","de-sv","el-en","el-es","el-et","el-fi","el-fr","el-hu","el-it","el-lt","el-lv","el-mt","el-nl","el-pl","el-pt","el-ro","el-sk","el-sl","el-sv","en-es","en-et","en-fi","en-fr","en-hu","en-it","en-lt","en-lv","en-mt","en-nl","en-pl","en-pt","en-ro","en-sk","en-sl","en-sv","es-et","es-fi","es-fr","es-hu","es-it","es-lt","es-lv","es-mt","es-nl","es-pl","es-pt","es-ro","es-sk","es-sl","es-sv","et-fi","et-fr","et-hu","et-it","et-lt","et-lv","et-mt","et-nl","et-pl","et-pt","et-ro","et-sk","et-sl","et-sv","fi-fr","fi-hu","fi-it","fi-lt","fi-lv","fi-mt","fi-nl","fi-pl","fi-pt","fi-ro","fi-sk","fi-sl","fi-sv","fr-hu","fr-it","fr-lt","fr-lv","fr-mt","fr-nl","fr-pl","fr-pt","fr-ro","fr-sk","fr-sl","fr-sv","hu-it","hu-lt","hu-lv","hu-mt","hu-nl","hu-pl","hu-pt","hu-ro","hu-sk","hu-sl","hu-sv","it-lt","it-lv","it-mt","it-nl","it-pl","it-pt","it-ro","it-sk","it-sl","it-sv","lt-lv","lt-mt","lt-nl","lt-pl","lt-pt","lt-ro","lt-sk","lt-sl","lt-sv","lv-mt","lv-nl","lv-pl","lv-pt","lv-ro","lv-sk","lv-sl","lv-sv","mt-nl","mt-pl","mt-pt","mt-ro","mt-sk","mt-sl","mt-sv","nl-pl","nl-pt","nl-ro","nl-sk","nl-sl","nl-sv","pl-pt","pl-ro","pl-sk","pl-sl","pl-sv","pt-ro","pt-sk","pt-sl","pt-sv","ro-sk","ro-sl","ro-sv","sk-sl","sk-sv","sl-sv"]
_LICENSE = """
This work is licensed under a <a rel="license" href="https://creativecommons.org/licenses/by/4.0/deed.fr">Attribution 4.0 International (CC BY 4.0) License</a>.
"""
_DESCRIPTION = "No description"
_URL = "https://huggingface.co/datasets/qanastek/EMEA-V3/resolve/main/csv/{}.csv.gz"
# _URL = "https://huggingface.co/datasets/qanastek/EMEA-V3/raw/main/csv/{}.csv.gz"
_DESCRIPTION = "No description"
class EMEA_V3(datasets.GeneratorBasedBuilder):
"""EMEA-V3 dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, version=datasets.Version("3.0.0"), description="The EMEA-V3 corpora") for name in _LANGUAGE_PAIRS
]
DEFAULT_CONFIG_NAME = "bg-cs"
def _info(self):
src, target = self.config.name.split("-")
pair = (src, target)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"translation": datasets.features.Translation(languages=pair)}
),
supervised_keys=(src, target),
homepage="https://github.com/qanastek/EMEA-V3/",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Download the CSV
data_dir = dl_manager.download(_URL.format(self.config.name))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir,
"split": "train",
}
),
]
def _generate_examples(self, filepath, split):
logger.info("⏳ Generating examples from = %s", filepath)
key_ = 0
df = pd.read_csv(filepath, compression='gzip', header=0, sep=',')
for index, row in df.iterrows():
# Get langue pair
src, target = str(row['lang']).split("-")
yield key_, {
"translation": {
src: str(row['source_text']).strip(),
target: str(row['target_text']).strip(),
},
}
key_ += 1