# coding=utf-8 # Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py """WMT'16 Biomedical Translation Task - PubMed parallel datasets""" import gzip import datasets import pandas as pd logger = datasets.logging.get_logger(__name__) _CITATION = """ @inproceedings{bojar-etal-2016-findings, title = Findings of the 2016 Conference on Machine Translation, author = { Bojar, Ondrej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huck, Matthias and Jimeno Yepes, Antonio and Koehn, Philipp and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Neveol, Aurelie and Neves, Mariana and Popel, Martin and Post, Matt and Rubino, Raphael and Scarton, Carolina and Specia, Lucia and Turchi, Marco and Verspoor, Karin and Zampieri, Marcos }, booktitle = Proceedings of the First Conference on Machine Translation: Volume 2, Shared Task Papers, month = aug, year = 2016, address = Berlin, Germany, publisher = Association for Computational Linguistics, url = https://aclanthology.org/W16-2301, doi = 10.18653/v1/W16-2301, pages = 131--198, } """ _LANGUAGE_PAIRS = ['en-pt', 'en-es', 'en-fr'] _LANGUAGE_PAIRS_TUPLES = [('en','pt'), ('en','es'), ('en','fr')] _LICENSE = """ This work is licensed under a Attribution 4.0 International (CC BY 4.0) License. """ _DESCRIPTION = """ WMT'16 Biomedical Translation Task - PubMed parallel datasets http://www.statmt.org/wmt16/biomedical-translation-task.html """ _URL = "https://huggingface.co/datasets/qanastek/WMT-16-PubMed/resolve/main/WMT16.csv.gz" class WMT_16_CONFIG(datasets.BuilderConfig): def __init__(self, *args, lang1=None, lang2=None, **kwargs): super().__init__( *args, name=f"{lang1}-{lang2}", **kwargs, ) self.name = f"{lang1}-{lang2}" self.lang1 = lang1 self.lang2 = lang2 class WMT_16_PubMed(datasets.GeneratorBasedBuilder): """WMT-16-PubMed dataset.""" DEFAULT_CONFIG_NAME = "en-fr" BUILDER_CONFIGS = [ WMT_16_CONFIG( lang1=lang1, lang2=lang2, description=f"Translating {lang1} to {lang2} or vice versa", version=datasets.Version("16.0.0"), ) for lang1, lang2 in _LANGUAGE_PAIRS_TUPLES ] BUILDER_CONFIG_CLASS = WMT_16_CONFIG # BUILDER_CONFIGS = [ # datasets.BuilderConfig(name=name, version=datasets.Version("16.0.0"), description=_DESCRIPTION) for name in _LANGUAGE_PAIRS # ] def _info(self): src, target = self.config.name.split("-") pair = (src, target) return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( {"translation": datasets.features.Translation(languages=pair)} ), supervised_keys=(src, target), homepage="https://www.statmt.org/wmt16/biomedical-translation-task.html", citation=_CITATION, # license=_LICENSE, ) def _split_generators(self, dl_manager): # Download the CSV data_dir = dl_manager.download(_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": data_dir, "split": "train", } ), ] def _generate_examples(self, filepath, split): logger.info("⏳ Generating examples from = %s", filepath) key_ = 0 with open(filepath, 'rb') as fd: gzip_fd = gzip.GzipFile(fileobj=fd) df = pd.read_csv(gzip_fd) # df = pd.read_csv(filepath, compression='gzip', header=0, sep=',') for index, row in df.loc[df['lang'] == self.config.name].iterrows(): # Get langue pair src, target = str(row['lang']).split("-") yield key_, { "translation": { src: str(row['source_text']).strip(), target: str(row['target_text']).strip(), }, } key_ += 1