Datasets:

Multilinguality:
multilingual
Size Categories:
10M<n<100M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
ec2850a
1 Parent(s): 3244c02

Delete loading script

Browse files
Files changed (1) hide show
  1. un_pc.py +0 -97
un_pc.py DELETED
@@ -1,97 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """The United Nations Parallel Corpus v1.0"""
16
-
17
-
18
- import itertools
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{ziemski-etal-2016-united,
26
- title = "The {U}nited {N}ations Parallel Corpus v1.0",
27
- author = "Ziemski, Micha{\\l} and
28
- Junczys-Dowmunt, Marcin and
29
- Pouliquen, Bruno",
30
- booktitle = "Proceedings of the Tenth International Conference on Language Resources and Evaluation ({LREC}'16)",
31
- month = may,
32
- year = "2016",
33
- address = "Portoro{\v{z}}, Slovenia",
34
- publisher = "European Language Resources Association (ELRA)",
35
- url = "https://www.aclweb.org/anthology/L16-1561",
36
- pages = "3530--3534",
37
- abstract = "This paper describes the creation process and statistics of the official United Nations Parallel Corpus, the first parallel corpus composed from United Nations documents published by the original data creator. The parallel corpus presented consists of manually translated UN documents from the last 25 years (1990 to 2014) for the six official UN languages, Arabic, Chinese, English, French, Russian, and Spanish. The corpus is freely available for download under a liberal license. Apart from the pairwise aligned documents, a fully aligned subcorpus for the six official UN languages is distributed. We provide baseline BLEU scores of our Moses-based SMT systems trained with the full data of language pairs involving English and for all possible translation directions of the six-way subcorpus.",
38
- }
39
- """
40
-
41
-
42
- _DESCRIPTION = """\
43
- This parallel corpus consists of manually translated UN documents from the last 25 years (1990 to 2014) \
44
- for the six official UN languages, Arabic, Chinese, English, French, Russian, and Spanish.
45
- """
46
-
47
-
48
- _HOMEPAGE = "https://opus.nlpl.eu/UNPC/corpus/version/UNPC"
49
-
50
- _LANGUAGES = ["ar", "en", "es", "fr", "ru", "zh"]
51
- _LANGUAGE_PAIRS = list(itertools.combinations(_LANGUAGES, 2))
52
-
53
- _BASE_URL = "https://object.pouta.csc.fi/OPUS-UNPC/v1.0/moses"
54
- _URLS = {f"{l1}-{l2}": f"{_BASE_URL}/{l1}-{l2}.txt.zip" for l1, l2 in _LANGUAGE_PAIRS}
55
-
56
-
57
- class UnPc(datasets.GeneratorBasedBuilder):
58
- """The United Nations Parallel Corpus v1.0"""
59
-
60
- VERSION = datasets.Version("1.0.0")
61
-
62
- BUILDER_CONFIGS = [
63
- datasets.BuilderConfig(name=f"{l1}-{l2}", version=datasets.Version("1.0.0"), description=f"UNPC {l1}-{l2}")
64
- for l1, l2 in _LANGUAGE_PAIRS
65
- ]
66
-
67
- def _info(self):
68
- return datasets.DatasetInfo(
69
- description=_DESCRIPTION,
70
- features=datasets.Features(
71
- {"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
72
- ),
73
- supervised_keys=None,
74
- homepage=_HOMEPAGE,
75
- citation=_CITATION,
76
- )
77
-
78
- def _split_generators(self, dl_manager):
79
- """Returns SplitGenerators."""
80
- lang_pair = self.config.name.split("-")
81
- data_dir = dl_manager.download_and_extract(_URLS[self.config.name])
82
- return [
83
- datasets.SplitGenerator(
84
- name=datasets.Split.TRAIN,
85
- gen_kwargs={
86
- "source_file": os.path.join(data_dir, f"UNPC.{self.config.name}.{lang_pair[0]}"),
87
- "target_file": os.path.join(data_dir, f"UNPC.{self.config.name}.{lang_pair[1]}"),
88
- },
89
- ),
90
- ]
91
-
92
- def _generate_examples(self, source_file, target_file):
93
- source, target = tuple(self.config.name.split("-"))
94
- with open(source_file, encoding="utf-8") as src_f, open(target_file, encoding="utf-8") as tgt_f:
95
- for idx, (l1, l2) in enumerate(zip(src_f, tgt_f)):
96
- result = {"translation": {source: l1.strip(), target: l2.strip()}}
97
- yield idx, result