Datasets:

Multilinguality:
translation
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
b3f6c16
1 Parent(s): 5ac1042

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (5213cfc2742836ef49f0bb751988fc2595a01682)
- Delete loading script (d14e22021515a38945ada3c14346d8e23ed3de35)

Files changed (3) hide show
  1. README.md +9 -5
  2. opus_memat.py +0 -88
  3. xh-en/train-00000-of-00001.parquet +3 -0
README.md CHANGED
@@ -17,9 +17,9 @@ source_datasets:
17
  task_categories:
18
  - translation
19
  task_ids: []
20
- paperswithcode_id: null
21
  pretty_name: OpusMemat
22
  dataset_info:
 
23
  features:
24
  - name: translation
25
  dtype:
@@ -27,13 +27,17 @@ dataset_info:
27
  languages:
28
  - xh
29
  - en
30
- config_name: xh-en
31
  splits:
32
  - name: train
33
- num_bytes: 25400570
34
  num_examples: 154764
35
- download_size: 8382865
36
- dataset_size: 25400570
 
 
 
 
 
37
  ---
38
 
39
  # Dataset Card for [opus_memat]
17
  task_categories:
18
  - translation
19
  task_ids: []
 
20
  pretty_name: OpusMemat
21
  dataset_info:
22
+ config_name: xh-en
23
  features:
24
  - name: translation
25
  dtype:
27
  languages:
28
  - xh
29
  - en
 
30
  splits:
31
  - name: train
32
+ num_bytes: 25400442
33
  num_examples: 154764
34
+ download_size: 14115561
35
+ dataset_size: 25400442
36
+ configs:
37
+ - config_name: xh-en
38
+ data_files:
39
+ - split: train
40
+ path: xh-en/train-*
41
  ---
42
 
43
  # Dataset Card for [opus_memat]
opus_memat.py DELETED
@@ -1,88 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Xhosa-English parallel corpora, """
16
-
17
-
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- J. Tiedemann, 2012, Parallel Data, Tools and Interfaces in OPUS. In Proceedings of the 8th\
25
- International Conference on Language Resources and Evaluation (LREC 2012)
26
-
27
- """
28
-
29
- _DESCRIPTION = """\
30
- Xhosa-English parallel corpora, funded by EPSRC, the Medical Machine Translation project worked on machine translation\
31
- between ixiXhosa and English, with a focus on the medical domain."""
32
-
33
-
34
- _URLs = {"train": "https://object.pouta.csc.fi/OPUS-memat/v1/moses/en-xh.txt.zip"}
35
-
36
-
37
- class OpusMemat(datasets.GeneratorBasedBuilder):
38
-
39
- VERSION = datasets.Version("1.0.0")
40
-
41
- BUILDER_CONFIGS = [
42
- datasets.BuilderConfig(name="xh-en", version=VERSION, description="Xhosa-English parallel corpora")
43
- ]
44
-
45
- def _info(self):
46
- return datasets.DatasetInfo(
47
- description=_DESCRIPTION,
48
- features=datasets.Features(
49
- {"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
50
- ),
51
- supervised_keys=None,
52
- homepage="http://opus.nlpl.eu/memat.php",
53
- citation=_CITATION,
54
- )
55
-
56
- def _split_generators(self, dl_manager):
57
- """Returns SplitGenerators."""
58
- data_dir = dl_manager.download_and_extract(_URLs)
59
- return [
60
- datasets.SplitGenerator(
61
- name=datasets.Split.TRAIN,
62
- # These kwargs will be passed to _generate_examples
63
- gen_kwargs={
64
- "source_file": os.path.join(data_dir["train"], "memat.en-xh.xh"),
65
- "target_file": os.path.join(data_dir["train"], "memat.en-xh.en"),
66
- "split": "train",
67
- },
68
- ),
69
- ]
70
-
71
- def _generate_examples(self, source_file, target_file, split):
72
- """This function returns the examples in the raw (text) form."""
73
- with open(source_file, encoding="utf-8") as f:
74
- source_sentences = f.read().split("\n")
75
- with open(target_file, encoding="utf-8") as f:
76
- target_sentences = f.read().split("\n")
77
-
78
- assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
79
- len(source_sentences),
80
- len(target_sentences),
81
- source_file,
82
- target_file,
83
- )
84
-
85
- source, target = tuple(self.config.name.split("-"))
86
- for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
87
- result = {"translation": {source: l1, target: l2}}
88
- yield idx, result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
xh-en/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24225fece9f29cc89525c6fdedc9b9f68a6558c710bd8c3ab884078a9ab784d3
3
+ size 14115561