Datasets:

Multilinguality:
multilingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
License:
albertvillanova HF staff commited on
Commit
d1a454f
1 Parent(s): 3cde639

Delete loading script

Browse files
Files changed (1) hide show
  1. capes.py +0 -98
capes.py DELETED
@@ -1,98 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Capes: Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES"""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @inproceedings{soares2018parallel,
23
- title={A Parallel Corpus of Theses and Dissertations Abstracts},
24
- author={Soares, Felipe and Yamashita, Gabrielli Harumi and Anzanello, Michel Jose},
25
- booktitle={International Conference on Computational Processing of the Portuguese Language},
26
- pages={345--352},
27
- year={2018},
28
- organization={Springer}
29
- }
30
- """
31
-
32
-
33
- _DESCRIPTION = """\
34
- A parallel corpus of theses and dissertations abstracts in English and Portuguese were collected from the \
35
- CAPES website (Coordenação de Aperfeiçoamento de Pessoal de Nível Superior) - Brazil. \
36
- The corpus is sentence aligned for all language pairs. Approximately 240,000 documents were \
37
- collected and aligned using the Hunalign algorithm.
38
- """
39
-
40
-
41
- _HOMEPAGE = "https://sites.google.com/view/felipe-soares/datasets#h.p_kxOR6EhHm2a6"
42
-
43
- _URL = "https://ndownloader.figstatic.com/files/14015837"
44
-
45
-
46
- class Capes(datasets.GeneratorBasedBuilder):
47
- """Capes: Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES"""
48
-
49
- VERSION = datasets.Version("1.0.0")
50
-
51
- BUILDER_CONFIGS = [
52
- datasets.BuilderConfig(
53
- name="en-pt",
54
- version=datasets.Version("1.0.0"),
55
- description="Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES",
56
- )
57
- ]
58
-
59
- def _info(self):
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=datasets.Features(
63
- {"translation": datasets.features.Translation(languages=self.config.name.split("-"))}
64
- ),
65
- supervised_keys=None,
66
- homepage=_HOMEPAGE,
67
- citation=_CITATION,
68
- )
69
-
70
- def _split_generators(self, dl_manager):
71
- """Returns SplitGenerators."""
72
- archive = dl_manager.download(_URL)
73
- return [
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TRAIN,
76
- gen_kwargs={
77
- "source_file": "en_pt.en",
78
- "target_file": "en_pt.pt",
79
- "src_files": dl_manager.iter_archive(archive),
80
- "tgt_files": dl_manager.iter_archive(archive),
81
- },
82
- ),
83
- ]
84
-
85
- def _generate_examples(self, source_file, target_file, src_files, tgt_files):
86
- source, target = tuple(self.config.name.split("-"))
87
- for src_path, src_f in src_files:
88
- if src_path == source_file:
89
- for tgt_path, tgt_f in tgt_files:
90
- if tgt_path == target_file:
91
- for idx, (l1, l2) in enumerate(zip(src_f, tgt_f)):
92
- l1 = l1.decode("utf-8").strip()
93
- l2 = l2.decode("utf-8").strip()
94
- if l1 and l2:
95
- result = {"translation": {source: l1, target: l2}}
96
- yield idx, result
97
- break
98
- break