europarl-mono / europarl-mono.py
system's picture
system HF staff
import from S3
5d592f8
"""Europarl Monolingual Dataset."""
from __future__ import absolute_import, division, print_function
from dataclasses import dataclass
import json
import os
import re
import datasets
_CITATION = """\
@inproceedings{koehn2005europarl,
title={Europarl: A parallel corpus for statistical machine translation},
author={Koehn, Philipp},
booktitle={MT summit},
volume={5},
pages={79--86},
year={2005},
organization={Citeseer}
}
"""
_DESCRIPTION = """\
Europarl Monolingual Dataset.
The Europarl parallel corpus is extracted from the proceedings of the
European Parliament (from 2000 to 2011). It includes versions in 21 European
languages: Romanic (French, Italian, Spanish, Portuguese, Romanian),
Germanic (English, Dutch, German, Danish, Swedish), Slavik (Bulgarian,
Czech, Polish, Slovak, Slovene), Finni-Ugric (Finnish, Hungarian, Estonian),
Baltic (Latvian, Lithuanian), and Greek.
Upstream url: https://www.statmt.org/europarl/
"""
_HOMEPAGE = "https://www.statmt.org/europarl/"
_AVAILABLE_LANGUAGES = [
("bg", "Bulgarian"), ("cs", "Czech"), ("da", "Danish"), ("de", "German"),
("el", "Greek"), ("en", "English"), ("es", "Spanish"), ("et", "Estonian"),
("fi", "Finnish"), ("fr", "French"), ("hu", "Hungarian"), ("it", "Italian"),
("lt", "Lithuanian"), ("lv", "Latvian"), ("nl", "Dutch"), ("pl", "Polish"),
("pt", "Portuguese"), ("ro", "Romanian"), ("sk", "Slovak"),
("sl", "Slovene"), ("sv", "Swedish")
]
@dataclass
class EuroparlMonoConfig(datasets.BuilderConfig):
"""BuilderConfig for Europarl Monolingual."""
language: str = None
class EuroparlMonoDataset(datasets.GeneratorBasedBuilder):
"""Europarl Monolingual Dataset."""
_TRAIN_FILE = "train.jsonl"
_VAL_FILE = "val.jsonl"
_TEST_FILE = "test.jsonl"
BUILDER_CONFIGS = [
EuroparlMonoConfig(
name="europarl-%s" % langISO2,
language=langISO2,
version=datasets.Version("7.0.0"),
description="Europarl %s Dataset." % langEnglish,
) for langISO2, langEnglish in _AVAILABLE_LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"paragraph": datasets.Value("string"),
"date": datasets.Value("string"),
"chapter": datasets.Value("string"),
"speaker": datasets.Value("int16"),
"speaker_name": datasets.Value("string")
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
arch_path = dl_manager.download_and_extract(os.path.join(_HOMEPAGE, "v%d" % self.config.version.major, "europarl.tgz"))
path_dir = os.path.join(arch_path, os.path.join("txt", self.config.language))
paths = [os.path.join(path_dir, file) for file in os.listdir(path_dir)]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": paths}
)
]
def _generate_examples(self, filepath):
"""Generate Europarl Monolingual examples."""
id_ = 0
for file in filepath:
with open(file, "r") as f:
current_date = "-".join(os.path.basename(file).strip(".txt").split("-")[-4:-1])
current_paragraph = ""
current_paragraph_id = 0
current_chapter = 0
current_speaker_id = 0
current_speaker_name = ""
for line in f:
if not line.startswith("<"):
current_paragraph += line.strip("\n")
else:
if len(current_paragraph) != 0:
yield id_, {
"paragraph": current_paragraph,
"date": current_date,
"chapter": current_chapter,
"speaker": current_speaker_id,
"speaker_name": current_speaker_name,
}
current_paragraph = ""
id_ += 1
if line.startswith("<P>"):
current_paragraph_id += 1
if line.startswith("<CHAPTER"):
current_chapter = line.lstrip("<CHAPTER ID=").rstrip(">\n").strip("\"")
if line.startswith("<SPEAKER"):
current_speaker_id = "0"
current_speaker_name = ""
grps = re.findall("([^<\s]+)=([^\s\">]+|\"[^\">]+\")", line)
for attr, value in grps:
value = value.strip("\"")
if attr == "ID":
current_speaker_id = int(value)
elif attr == "NAME":
current_speaker_name = value