File size: 2,629 Bytes
09653d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b0e976
09653d1
 
 
 
1da9876
09653d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import datasets
import os
import json


_CITATION = ""
_DESCRIPTION = """
    The 2014 Workshop on Statistical Machine Translation:
    https://aclanthology.org/W14-3302.pdf

    The scenario consists of 5 subsets, each of which is a parallel corpus between English and another language. The
    non-English languages include Czech, German, French, Hindi, and Russian.

    For each language pair, the validation and test set each includes around 3,000 examples, while the training set is
    usually much larger. We therefore randomly downsample the training set to speedup data processing.

    Task prompt structure:

        Translate {source_language} to {target_language}:
        {Hypothesis} = {Reference}

    Example from WMT14 Fr-En:

        Hypothesis: Assemblée générale
        Reference: General Assembly
"""

class WMT14(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description="")
            for name in ["cs-en", "de-en", "fr-en", "hi-en", "ru-en"]
        ]

    def _info(self):
        source_language, target_language = self.config.name.split('-')
        features = datasets.Features(
            {
                source_language: datasets.Value("string"),
                target_language: datasets.Value("string"),

            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage="",
            license="",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        train_json = dl_manager.download(os.path.join(self.config.name, "train.jsonl"))
        test_json = dl_manager.download(os.path.join(self.config.name, "test.jsonl"))
        val_json = dl_manager.download(os.path.join(self.config.name, "validation.jsonl"))

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"path": train_json},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"path": test_json},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"path": val_json},
            )
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, path):
        with open(path, encoding="utf-8") as f:
            for key, row in enumerate(f):
                yield key, json.loads(row)