Datasets:
File size: 4,418 Bytes
977f7a6 4fa03b2 977f7a6 3ed37b1 977f7a6 3ed37b1 a620c93 3ed37b1 977f7a6 3ed37b1 7e365eb 3ed37b1 7e365eb 977f7a6 7e365eb 3ed37b1 7e365eb 977f7a6 4fa03b2 e8e14cc 4fa03b2 977f7a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
# coding=utf-8
# Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
"""WMT'16 Biomedical Translation Task - PubMed parallel datasets"""
import gzip
import datasets
import pandas as pd
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@inproceedings{bojar-etal-2016-findings,
title = Findings of the 2016 Conference on Machine Translation,
author = {
Bojar, Ondrej and
Chatterjee, Rajen and
Federmann, Christian and
Graham, Yvette and
Haddow, Barry and
Huck, Matthias and
Jimeno Yepes, Antonio and
Koehn, Philipp and
Logacheva, Varvara and
Monz, Christof and
Negri, Matteo and
Neveol, Aurelie and
Neves, Mariana and
Popel, Martin and
Post, Matt and
Rubino, Raphael and
Scarton, Carolina and
Specia, Lucia and
Turchi, Marco and
Verspoor, Karin and
Zampieri, Marcos
},
booktitle = Proceedings of the First Conference on Machine Translation: Volume 2, Shared Task Papers,
month = aug,
year = 2016,
address = Berlin, Germany,
publisher = Association for Computational Linguistics,
url = https://aclanthology.org/W16-2301,
doi = 10.18653/v1/W16-2301,
pages = 131--198,
}
"""
_LANGUAGE_PAIRS = ['en-pt', 'en-es', 'en-fr']
_LANGUAGE_PAIRS_TUPLES = [('en','pt'), ('en','es'), ('en','fr')]
_LICENSE = """
This work is licensed under a <a rel="license" href="https://creativecommons.org/licenses/by/4.0/">Attribution 4.0 International (CC BY 4.0) License</a>.
"""
_DESCRIPTION = """
WMT'16 Biomedical Translation Task - PubMed parallel datasets
http://www.statmt.org/wmt16/biomedical-translation-task.html
"""
_URL = "https://huggingface.co/datasets/qanastek/WMT-16-PubMed/resolve/main/WMT16.csv.gz"
class WMT_16_CONFIG(datasets.BuilderConfig):
def __init__(self, *args, lang1=None, lang2=None, **kwargs):
super().__init__(
*args,
name=f"{lang1}-{lang2}",
**kwargs,
)
self.name = f"{lang1}-{lang2}"
self.lang1 = lang1
self.lang2 = lang2
class WMT_16_PubMed(datasets.GeneratorBasedBuilder):
"""WMT-16-PubMed dataset."""
DEFAULT_CONFIG_NAME = "en-fr"
BUILDER_CONFIGS = [
WMT_16_CONFIG(
lang1=lang1,
lang2=lang2,
description=f"Translating {lang1} to {lang2} or vice versa",
version=datasets.Version("16.0.0"),
)
for lang1, lang2 in _LANGUAGE_PAIRS_TUPLES
]
BUILDER_CONFIG_CLASS = WMT_16_CONFIG
# BUILDER_CONFIGS = [
# datasets.BuilderConfig(name=name, version=datasets.Version("16.0.0"), description=_DESCRIPTION) for name in _LANGUAGE_PAIRS
# ]
def _info(self):
src, target = self.config.name.split("-")
pair = (src, target)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"translation": datasets.features.Translation(languages=pair)}
),
supervised_keys=(src, target),
homepage="https://www.statmt.org/wmt16/biomedical-translation-task.html",
citation=_CITATION,
# license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Download the CSV
data_dir = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir,
"split": "train",
}
),
]
def _generate_examples(self, filepath, split):
logger.info("⏳ Generating examples from = %s", filepath)
key_ = 0
with open(filepath, 'rb') as fd:
gzip_fd = gzip.GzipFile(fileobj=fd)
df = pd.read_csv(gzip_fd)
# df = pd.read_csv(filepath, compression='gzip', header=0, sep=',')
for index, row in df.loc[df['lang'] == self.config.name].iterrows():
# Get langue pair
src, target = str(row['lang']).split("-")
yield key_, {
"translation": {
src: str(row['source_text']).strip(),
target: str(row['target_text']).strip(),
},
}
key_ += 1
|