ECDC / ECDC.py
qanastek's picture
Update
875004b
# coding=utf-8
# Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
"""ECDC : European parallel translation corpus from the European Medicines Agency"""
import gzip
import datasets
import pandas as pd
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@article{10.1007/s10579-014-9277-0,
author = {Steinberger, Ralf and Ebrahim, Mohamed and Poulis, Alexandros and Carrasco-Benitez, Manuel and Schl\"{u}ter, Patrick and Przybyszewski, Marek and Gilbro, Signe},
title = {An Overview of the European Union's Highly Multilingual Parallel Corpora},
year = {2014},
issue_date = {December 2014},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
volume = {48},
number = {4},
issn = {1574-020X},
url = {https://doi.org/10.1007/s10579-014-9277-0},
doi = {10.1007/s10579-014-9277-0},
abstract = {Starting in 2006, the European Commission's Joint Research Centre and other European Union organisations have made available a number of large-scale highly-multilingual parallel language resources. In this article, we give a comparative overview of these resources and we explain the specific nature of each of them. This article provides answers to a number of question, including: What are these linguistic resources? What is the difference between them? Why were they originally created and why was the data released publicly? What can they be used for and what are the limitations of their usability? What are the text types, subject domains and languages covered? How to avoid overlapping document sets? How do they compare regarding the formatting and the translation alignment? What are their usage conditions? What other types of multilingual linguistic resources does the EU have? This article thus aims to clarify what the similarities and differences between the various resources are and what they can be used for. It will also serve as a reference publication for those resources, for which a more detailed description has been lacking so far (EAC-TM, ECDC-TM and DGT-Acquis).},
journal = {Lang. Resour. Eval.},
month = {dec},
pages = {679–707},
numpages = {29},
keywords = {DCEP, EAC-TM, EuroVoc, JRC EuroVoc Indexer JEX, Parallel corpora, DGT-TM, Eur-Lex, Highly multilingual, Linguistic resources, DGT-Acquis, European Union, ECDC-TM, JRC-Acquis, Translation memory}
}
"""
_LANGUAGE_PAIRS = ["en-sv","en-pl","en-hu","en-lt","en-sk","en-ga","en-fr","en-cs","en-el","en-it","en-lv","en-da","en-nl","en-bg","en-is","en-ro","en-no","en-pt","en-es","en-et","en-mt","en-sl","en-fi","en-de"]
_LICENSE = """
This work is licensed under <a rel="license" href="https://wt-public.emm4u.eu/Resources/ECDC-TM/2012_10_Terms-of-Use_ECDC-TM.pdf">Copyright (c) EU/ECDC, 2022</a>.
"""
_DESCRIPTION = "No description"
_URL = "https://huggingface.co/datasets/qanastek/ECDC/resolve/main/csv/ECDC.csv.gz"
_DESCRIPTION = "No description"
class ECDC(datasets.GeneratorBasedBuilder):
"""ECDC dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description="The ECDC corpora") for name in _LANGUAGE_PAIRS
]
DEFAULT_CONFIG_NAME = "en-it"
def _info(self):
src, target = self.config.name.split("-")
pair = (src, target)
return datasets.DatasetInfo(
description=_DESCRIPTION,
# features=datasets.Features(
# {"translation": datasets.features.Translation(languages=pair)}
# ),
features=datasets.Features(
{
"doc_id": datasets.Value("string"),
"lang": datasets.Value("string"),
"translation": datasets.Translation(languages=pair),
},
),
supervised_keys=(src, target),
homepage="https://github.com/qanastek/ECDC/",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Download the CSV
data_dir = dl_manager.download(_URL.format(self.config.name))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir,
"split": "train",
}
),
]
def _generate_examples(self, filepath, split):
logger.info("⏳ Generating examples from = %s", filepath)
key_ = 0
with open(filepath, 'rb') as fd:
gzip_fd = gzip.GzipFile(fileobj=fd)
df = pd.read_csv(gzip_fd)
for index, row in df.loc[df['lang'] == self.config.name].iterrows():
# Get langue pair
src, target = str(row['lang']).split("-")
yield key_, {
"doc_id": row['key'],
"lang": row['lang'],
"translation": {
src: str(row['source_text']).strip(),
target: str(row['target_text']).strip(),
},
}
key_ += 1