File size: 5,190 Bytes
5d592f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
"""Europarl Monolingual Dataset."""

from __future__ import absolute_import, division, print_function
from dataclasses import dataclass

import json
import os
import re

import datasets


_CITATION = """\
@inproceedings{koehn2005europarl,
  title={Europarl: A parallel corpus for statistical machine translation},
  author={Koehn, Philipp},
  booktitle={MT summit},
  volume={5},
  pages={79--86},
  year={2005},
  organization={Citeseer}
}
"""

_DESCRIPTION = """\
Europarl Monolingual Dataset.

The Europarl parallel corpus is extracted from the proceedings of the
European Parliament (from 2000 to 2011). It includes versions in 21 European
languages: Romanic (French, Italian, Spanish, Portuguese, Romanian),
Germanic (English, Dutch, German, Danish, Swedish), Slavik (Bulgarian,
Czech, Polish, Slovak, Slovene), Finni-Ugric (Finnish, Hungarian, Estonian),
Baltic (Latvian, Lithuanian), and Greek.

Upstream url: https://www.statmt.org/europarl/
"""

_HOMEPAGE = "https://www.statmt.org/europarl/"

_AVAILABLE_LANGUAGES = [
    ("bg", "Bulgarian"), ("cs", "Czech"), ("da", "Danish"), ("de", "German"),
    ("el", "Greek"), ("en", "English"), ("es", "Spanish"), ("et", "Estonian"),
    ("fi", "Finnish"), ("fr", "French"), ("hu", "Hungarian"), ("it", "Italian"),
    ("lt", "Lithuanian"), ("lv", "Latvian"), ("nl", "Dutch"), ("pl", "Polish"),
    ("pt", "Portuguese"), ("ro", "Romanian"), ("sk", "Slovak"),
    ("sl", "Slovene"), ("sv", "Swedish")
]

@dataclass
class EuroparlMonoConfig(datasets.BuilderConfig):
    """BuilderConfig for Europarl Monolingual."""
    language: str = None


class EuroparlMonoDataset(datasets.GeneratorBasedBuilder):
    """Europarl Monolingual Dataset."""

    _TRAIN_FILE = "train.jsonl"
    _VAL_FILE = "val.jsonl"
    _TEST_FILE = "test.jsonl"

    BUILDER_CONFIGS = [
        EuroparlMonoConfig(
            name="europarl-%s" % langISO2,
            language=langISO2,
            version=datasets.Version("7.0.0"),
            description="Europarl %s Dataset." % langEnglish,
        ) for langISO2, langEnglish in _AVAILABLE_LANGUAGES
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "paragraph": datasets.Value("string"),
                    "date": datasets.Value("string"),
                    "chapter": datasets.Value("string"),
                    "speaker": datasets.Value("int16"),
                    "speaker_name": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        arch_path = dl_manager.download_and_extract(os.path.join(_HOMEPAGE, "v%d" % self.config.version.major, "europarl.tgz"))
        path_dir = os.path.join(arch_path, os.path.join("txt", self.config.language))
        paths = [os.path.join(path_dir, file) for file in os.listdir(path_dir)]
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"filepath": paths}
            )
        ]

    def _generate_examples(self, filepath):
        """Generate Europarl Monolingual examples."""

        id_ = 0
        for file in filepath:
            with open(file, "r") as f:
                current_date = "-".join(os.path.basename(file).strip(".txt").split("-")[-4:-1])
                current_paragraph = ""
                current_paragraph_id = 0
                current_chapter = 0
                current_speaker_id = 0
                current_speaker_name = ""
                for line in f:
                    if not line.startswith("<"):
                        current_paragraph += line.strip("\n")
                    else:
                        if len(current_paragraph) != 0:
                            yield id_, {
                                "paragraph": current_paragraph,
                                "date": current_date,
                                "chapter": current_chapter,
                                "speaker": current_speaker_id,
                                "speaker_name": current_speaker_name,
                            }
                            current_paragraph = ""
                            id_ += 1
                        if line.startswith("<P>"):
                            current_paragraph_id += 1
                        if line.startswith("<CHAPTER"):
                            current_chapter = line.lstrip("<CHAPTER ID=").rstrip(">\n").strip("\"")
                        if line.startswith("<SPEAKER"):
                            current_speaker_id = "0"
                            current_speaker_name = ""
                            grps = re.findall("([^<\s]+)=([^\s\">]+|\"[^\">]+\")", line)
                            for attr, value in grps:
                                value = value.strip("\"")
                                if attr == "ID":
                                    current_speaker_id = int(value)
                                elif attr == "NAME":
                                    current_speaker_name = value