|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Giga-word corpus for French-English from WMT2010 collected by Chris Callison-Burch |
|
2 languages, total number of files: 452 |
|
total number of tokens: 1.43G |
|
total number of sentence fragments: 47.55M |
|
""" |
|
_HOMEPAGE_URL = "http://opus.nlpl.eu/giga-fren.php" |
|
_CITATION = """\ |
|
@InProceedings{TIEDEMANN12.463, |
|
author = {J{\"o}rg Tiedemann}, |
|
title = {Parallel Data, Tools and Interfaces in OPUS}, |
|
booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)}, |
|
year = {2012}, |
|
month = {may}, |
|
date = {23-25}, |
|
address = {Istanbul, Turkey}, |
|
editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis}, |
|
publisher = {European Language Resources Association (ELRA)}, |
|
isbn = {978-2-9517408-7-7}, |
|
language = {english} |
|
} |
|
""" |
|
|
|
_VERSION = "2.0.0" |
|
_BASE_NAME = "giga-fren.{}.{}" |
|
_URL = "https://object.pouta.csc.fi/OPUS-giga-fren/v2/moses/en-fr.txt.zip" |
|
|
|
|
|
class GigaFrenConfig(datasets.BuilderConfig): |
|
def __init__(self, *args, **kwargs): |
|
super().__init__( |
|
*args, |
|
name="en-fr", |
|
**kwargs, |
|
) |
|
|
|
@property |
|
def lang1(self): |
|
return "en" |
|
|
|
@property |
|
def lang2(self): |
|
return "fr" |
|
|
|
|
|
class GigaFren(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [GigaFrenConfig(description=f"Translating en to fr ", version=datasets.Version(_VERSION))] |
|
BUILDER_CONFIG_CLASS = GigaFrenConfig |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)), |
|
}, |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
path = dl_manager.download_and_extract(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"datapath": path}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, datapath): |
|
l1, l2 = self.config.lang1, self.config.lang2 |
|
folder = l1 + "-" + l2 |
|
l1_file = _BASE_NAME.format(folder, l1) |
|
l2_file = _BASE_NAME.format(folder, l2) |
|
l1_path = os.path.join(datapath, l1_file) |
|
l2_path = os.path.join(datapath, l2_file) |
|
with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2: |
|
for sentence_counter, (x, y) in enumerate(zip(f1, f2)): |
|
x = x.strip() |
|
y = y.strip() |
|
result = ( |
|
sentence_counter, |
|
{ |
|
"id": str(sentence_counter), |
|
"translation": {l1: x, l2: y}, |
|
}, |
|
) |
|
yield result |
|
|