Datasets:
File size: 3,602 Bytes
977f7a6 7e365eb 977f7a6 7e365eb 977f7a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
# coding=utf-8
# Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
"""WMT'16 Biomedical Translation Task - PubMed parallel datasets"""
import datasets
import pandas as pd
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@inproceedings{bojar-etal-2016-findings,
title = Findings of the 2016 Conference on Machine Translation,
author = {
Bojar, Ondrej and
Chatterjee, Rajen and
Federmann, Christian and
Graham, Yvette and
Haddow, Barry and
Huck, Matthias and
Jimeno Yepes, Antonio and
Koehn, Philipp and
Logacheva, Varvara and
Monz, Christof and
Negri, Matteo and
Neveol, Aurelie and
Neves, Mariana and
Popel, Martin and
Post, Matt and
Rubino, Raphael and
Scarton, Carolina and
Specia, Lucia and
Turchi, Marco and
Verspoor, Karin and
Zampieri, Marcos
},
booktitle = Proceedings of the First Conference on Machine Translation: Volume 2, Shared Task Papers,
month = aug,
year = 2016,
address = Berlin, Germany,
publisher = Association for Computational Linguistics,
url = https://aclanthology.org/W16-2301,
doi = 10.18653/v1/W16-2301,
pages = 131--198,
}
"""
_LANGUAGE_PAIRS = ['en-pt', 'en-es', 'en-fr']
_LICENSE = """
This work is licensed under a <a rel="license" href="https://creativecommons.org/licenses/by/4.0/">Attribution 4.0 International (CC BY 4.0) License</a>.
"""
_DESCRIPTION = """
WMT'16 Biomedical Translation Task - PubMed parallel datasets
http://www.statmt.org/wmt16/biomedical-translation-task.html
"""
_URL = "https://huggingface.co/datasets/qanastek/WMT-16-PubMed/resolve/main/WMT16.csv.gz"
class WMT_16_PubMed(datasets.GeneratorBasedBuilder):
"""WMT-16-PubMed dataset."""
# BUILDER_CONFIGS = [
# datasets.BuilderConfig(name=name, version=datasets.Version("16.0.0"), description=_DESCRIPTION, citation=_CITATION) for name in _LANGUAGE_PAIRS
# ]
DEFAULT_CONFIG_NAME = "en-fr"
def _info(self):
src, target = self.config.name.split("-")
pair = (src, target)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"translation": datasets.features.Translation(languages=pair)}
),
supervised_keys=(src, target),
homepage="https://www.statmt.org/wmt16/biomedical-translation-task.html",
citation=_CITATION,
# license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Download the CSV
data_dir = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir,
"split": "train",
}
),
]
def _generate_examples(self, filepath, split):
logger.info("⏳ Generating examples from = %s", filepath)
key_ = 0
df = pd.read_csv(filepath, compression='gzip', header=0, sep=',')
for index, row in df.loc[df['lang'] == self.config.name].iterrows():
row = row[1]
# Get langue pair
src, target = str(row['lang']).split("-")
yield key_, {
"translation": {
src: str(row['source_text']).strip(),
target: str(row['target_text']).strip(),
},
}
key_ += 1
|