Datasets:
Tasks:
Translation
Multilinguality:
translation
Size Categories:
100K<n<1M
Annotations Creators:
expert-generated
License:
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""WMT19 Metrics Shared Task: Segment-Level Data""" | |
import datasets | |
import csv | |
_CITATION = """\ | |
@inproceedings{ma-etal-2019-results, | |
title = {Results of the WMT19 Metrics Shared Task: Segment-Level and Strong MT Systems Pose Big Challenges}, | |
author = {Ma, Qingsong and Wei, Johnny and Bojar, Ondřej and Graham, Yvette}, | |
booktitle = {Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1)}, | |
month = {aug}, | |
year = {2019}, | |
address = {Florence, Italy}, | |
publisher = {Association for Computational Linguistics}, | |
url = {https://aclanthology.org/W19-5302}, | |
doi = {10.18653/v1/W19-5302}, | |
pages = {62--90} | |
} | |
""" | |
_DESCRIPTION = """\ | |
This shared task will examine automatic evaluation metrics for machine translation. We will provide | |
you with all of the translations produced in the translation task along with the human reference translations. | |
You will return your automatic metric scores for translations at the system-level and/or at the sentence-level. | |
We will calculate the system-level and sentence-level correlations of your scores with WMT19 human judgements | |
once the manual evaluation has been completed. | |
""" | |
_HOMEPAGE = "https://www.statmt.org/wmt19/metrics-task.html" | |
_LICENSE = "Unknown" | |
_LANGUAGE_PAIRS = [('de', 'cs'), ('de', 'en'), ('de', 'fr'), ('en', 'cs'), ('en', 'de'), ('en', 'fi'), ('en', 'gu'), ('en', 'kk'), ('en', 'lt'), ('en', 'ru'), | |
('en', 'zh'), ('fi', 'en'), ('fr', 'de'), ('gu', 'en'), ('kk', 'en'), ('lt', 'en'), ('ru', 'en'), ('zh', 'en')] | |
_URL_BASE = "https://huggingface.co/datasets/muibk/wmt19_metrics_task/resolve/main/" | |
_URLs = {f"{src_lg}-{trg_lg}": f"{_URL_BASE}{src_lg}-{trg_lg}/train.csv" for src_lg, trg_lg in _LANGUAGE_PAIRS} | |
class WmtMetricsTaskConfig(datasets.BuilderConfig): | |
"""BuilderConfig for WMT Metrics Shared Task.""" | |
def __init__(self, src_lg, tgt_lg, **kwargs): | |
super(WmtMetricsTaskConfig, self).__init__(**kwargs) | |
self.src_lg = src_lg | |
self.tgt_lg = tgt_lg | |
class Wmt19MetricsTask(datasets.GeneratorBasedBuilder): | |
"""WMT Metrics Shared Task.""" | |
BUILDER_CONFIGS = [ | |
WmtMetricsTaskConfig( | |
name=f"{src_lg}-{tgt_lg}", | |
version=datasets.Version("1.1.0"), | |
description=f"WMT 2019 Metrics Task: {src_lg} - {tgt_lg}", | |
src_lg=src_lg, | |
tgt_lg=tgt_lg, | |
) | |
for (src_lg, tgt_lg) in _LANGUAGE_PAIRS | |
] | |
BUILDER_CONFIG_CLASS = WmtMetricsTaskConfig | |
def _info(self): | |
# define feature types | |
features = datasets.Features( | |
{ | |
#'source' : datasets.Value("string"), | |
#'system_output':datasets.Value("string"), | |
'translation': datasets.Translation(languages=(self.config.src_lg, self.config.tgt_lg)), | |
'mt_system':datasets.Value("string"), | |
'mqm':datasets.Value("float32"), | |
'wmt-raw':datasets.Value("float32"), | |
'wmt-z':datasets.Value("float32"), | |
'pair':datasets.Value("string"), | |
'dataset':datasets.Value("string"), | |
'sent_id':datasets.Value("int32"), | |
'doc_name':datasets.Value("string"), | |
'doc_ref':datasets.Value("string"), | |
'ref':datasets.Value("string") | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
supervised_keys=None, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
pair = f"{self.config.src_lg}-{self.config.tgt_lg}" # string identifier for language pair | |
url = _URLs[pair] # url for download of pair-specific train.csv | |
data_file = dl_manager.download_and_extract(url) # extract downloaded data and store path in data_file | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"filepath": data_file, | |
"source_lg": self.config.src_lg, | |
"target_lg": self.config.tgt_lg, | |
} | |
) | |
] | |
def _generate_examples(self, filepath, source_lg, target_lg): | |
with open(filepath, encoding="utf-8") as f: | |
reader = csv.DictReader(f, delimiter=";") # read each line into dict | |
for id_, row in enumerate(reader): | |
row["translation"] = {source_lg : row["source"], target_lg: row["system_output"]} # create translation json | |
for key in ["source", "system_output"]: # remove obsolete columns | |
row.pop(key) | |
row = {k: None if not v else v for k, v in row.items()} # replace empty values | |
yield id_, row | |
# to test the script, go to the root folder of the repo (wmt19_metrics_task) and run: | |
# datasets-cli test ./wmt19_metrics_task.py --save_infos --all_configs |