albertvillanova HF staff commited on
Commit
2227661
1 Parent(s): ee70ec1

Convert dataset to Parquet (#9)

Browse files

- Convert dataset to Parquet (8a07a8a8f537eac954b623132cf4b860207d2d27)
- Add 'bible' config data files (a846a4cc8760f943bd3ce816b6866a0af92575b0)
- Add 'quran' config data files (6c0e9e6088f429cd016fa726615b2d36c85be1de)
- Delete loading script (6ca1255077beb5009bb9fa5a3628cc7930926604)

README.md CHANGED
@@ -20,7 +20,7 @@ task_ids: []
20
  paperswithcode_id: umc005-english-urdu
21
  pretty_name: UMC005 English-Urdu
22
  dataset_info:
23
- - config_name: bible
24
  features:
25
  - name: id
26
  dtype: string
@@ -32,17 +32,17 @@ dataset_info:
32
  - en
33
  splits:
34
  - name: train
35
- num_bytes: 2350730
36
- num_examples: 7400
37
  - name: validation
38
- num_bytes: 113476
39
- num_examples: 300
40
  - name: test
41
- num_bytes: 104678
42
- num_examples: 257
43
- download_size: 3683565
44
- dataset_size: 2568884
45
- - config_name: quran
46
  features:
47
  - name: id
48
  dtype: string
@@ -54,17 +54,17 @@ dataset_info:
54
  - en
55
  splits:
56
  - name: train
57
- num_bytes: 2929711
58
- num_examples: 6000
59
  - name: validation
60
- num_bytes: 43499
61
- num_examples: 214
62
  - name: test
63
- num_bytes: 44413
64
- num_examples: 200
65
- download_size: 3683565
66
- dataset_size: 3017623
67
- - config_name: all
68
  features:
69
  - name: id
70
  dtype: string
@@ -76,16 +76,42 @@ dataset_info:
76
  - en
77
  splits:
78
  - name: train
79
- num_bytes: 5280441
80
- num_examples: 13400
81
  - name: validation
82
- num_bytes: 156963
83
- num_examples: 514
84
  - name: test
85
- num_bytes: 149079
86
- num_examples: 457
87
- download_size: 3683565
88
- dataset_size: 5586483
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  ---
90
 
91
  # Dataset Card for UMC005 English-Urdu
 
20
  paperswithcode_id: umc005-english-urdu
21
  pretty_name: UMC005 English-Urdu
22
  dataset_info:
23
+ - config_name: all
24
  features:
25
  - name: id
26
  dtype: string
 
32
  - en
33
  splits:
34
  - name: train
35
+ num_bytes: 5360817
36
+ num_examples: 13400
37
  - name: validation
38
+ num_bytes: 160035
39
+ num_examples: 514
40
  - name: test
41
+ num_bytes: 151809
42
+ num_examples: 457
43
+ download_size: 2805950
44
+ dataset_size: 5672661
45
+ - config_name: bible
46
  features:
47
  - name: id
48
  dtype: string
 
54
  - en
55
  splits:
56
  - name: train
57
+ num_bytes: 2395118
58
+ num_examples: 7400
59
  - name: validation
60
+ num_bytes: 115264
61
+ num_examples: 300
62
  - name: test
63
+ num_bytes: 106208
64
+ num_examples: 257
65
+ download_size: 1279964
66
+ dataset_size: 2616590
67
+ - config_name: quran
68
  features:
69
  - name: id
70
  dtype: string
 
76
  - en
77
  splits:
78
  - name: train
79
+ num_bytes: 2965699
80
+ num_examples: 6000
81
  - name: validation
82
+ num_bytes: 44771
83
+ num_examples: 214
84
  - name: test
85
+ num_bytes: 45601
86
+ num_examples: 200
87
+ download_size: 1530371
88
+ dataset_size: 3056071
89
+ configs:
90
+ - config_name: all
91
+ data_files:
92
+ - split: train
93
+ path: all/train-*
94
+ - split: validation
95
+ path: all/validation-*
96
+ - split: test
97
+ path: all/test-*
98
+ default: true
99
+ - config_name: bible
100
+ data_files:
101
+ - split: train
102
+ path: bible/train-*
103
+ - split: validation
104
+ path: bible/validation-*
105
+ - split: test
106
+ path: bible/test-*
107
+ - config_name: quran
108
+ data_files:
109
+ - split: train
110
+ path: quran/train-*
111
+ - split: validation
112
+ path: quran/validation-*
113
+ - split: test
114
+ path: quran/test-*
115
  ---
116
 
117
  # Dataset Card for UMC005 English-Urdu
all/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aed70abd0ae4f4454cf749ad861a1d729e0d0700375fe625eedc2deccf26f04a
3
+ size 75680
all/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f758104aac7324fb7692915e3f03042f264c53966a41e634d501ef4e63e1d284
3
+ size 2651244
all/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d579e449bf487db726e329a1514f476899368ac0a2a7c39d7c37501d42b7ef6d
3
+ size 79026
bible/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bab87966e46e38c934f89f57a9016f390d18c1f9f3210080123a2efe97747eae
3
+ size 51924
bible/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17398c01e8f828b3a1909255fb626cc18bfc88f25acf4fcdc07ef96cd33f6d44
3
+ size 1171873
bible/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8f44672ec813774bae5590867744767213504037effe3e9cf418f40f924b8e4
3
+ size 56167
quran/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a54ca18643500a63e2544395d7502ecf702ba33b929be7a85392eb64d6906300
3
+ size 27563
quran/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b910a4e9f81b06c7504329baeea951189a820240e1582d66db4ed9455e1f1e7
3
+ size 1475594
quran/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c835ac28a98af62c07d4bc197e392755bc0cd732f10d36614d39410f8419f45
3
+ size 27214
um005.py DELETED
@@ -1,145 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- UMC005 English-Urdu is a parallel corpus of texts in English and Urdu language with sentence alignments. The corpus can be used for experiments with statistical machine translation.
24
-
25
- The texts come from four different sources:
26
- - Quran
27
- - Bible
28
- - Penn Treebank (Wall Street Journal)
29
- - Emille corpus
30
-
31
- The authors provide the religious texts of Quran and Bible for direct download. Because of licensing reasons, Penn and Emille texts cannot be redistributed freely. However, if you already hold a license for the original corpora, we are able to provide scripts that will recreate our data on your disk. Our modifications include but are not limited to the following:
32
-
33
- - Correction of Urdu translations and manual sentence alignment of the Emille texts.
34
- - Manually corrected sentence alignment of the other corpora.
35
- - Our data split (training-development-test) so that our published experiments can be reproduced.
36
- - Tokenization (optional, but needed to reproduce our experiments).
37
- - Normalization (optional) of e.g. European vs. Urdu numerals, European vs. Urdu punctuation, removal of Urdu diacritics.
38
- """
39
- _HOMEPAGE_URL = "https://ufal.mff.cuni.cz/umc/005-en-ur/"
40
- _URL = "https://ufal.mff.cuni.cz/umc/005-en-ur/download.php?f=umc005-corpus.zip"
41
- _CITATION = """\
42
- @unpublished{JaZeWordOrderIssues2011,
43
- author = {Bushra Jawaid and Daniel Zeman},
44
- title = {Word-Order Issues in {English}-to-{Urdu} Statistical Machine Translation},
45
- year = {2011},
46
- journal = {The Prague Bulletin of Mathematical Linguistics},
47
- number = {95},
48
- institution = {Univerzita Karlova},
49
- address = {Praha, Czechia},
50
- issn = {0032-6585},
51
- }
52
- """
53
-
54
- _ALL = "all"
55
- _VERSION = "1.0.0"
56
- _SOURCES = ["bible", "quran"]
57
- _SOURCES_FILEPATHS = {
58
- s: {
59
- "train": {"urdu": "train.ur", "english": "train.en"},
60
- "dev": {"urdu": "dev.ur", "english": "dev.en"},
61
- "test": {"urdu": "test.ur", "english": "test.en"},
62
- }
63
- for s in _SOURCES
64
- }
65
-
66
-
67
- class UM005Config(datasets.BuilderConfig):
68
- def __init__(self, *args, sources=None, **kwargs):
69
- super().__init__(*args, version=datasets.Version(_VERSION, ""), **kwargs)
70
- self.sources = sources
71
-
72
- @property
73
- def language_pair(self):
74
- return ("ur", "en")
75
-
76
-
77
- class UM005(datasets.GeneratorBasedBuilder):
78
- BUILDER_CONFIGS = [
79
- UM005Config(name=source, sources=[source], description=f"Source: {source}.") for source in _SOURCES
80
- ] + [
81
- UM005Config(
82
- name=_ALL,
83
- sources=_SOURCES,
84
- description="All sources included: bible, quran",
85
- )
86
- ]
87
- BUILDER_CONFIG_CLASS = UM005Config
88
- DEFAULT_CONFIG_NAME = _ALL
89
-
90
- def _info(self):
91
- return datasets.DatasetInfo(
92
- description=_DESCRIPTION,
93
- features=datasets.Features(
94
- {
95
- "id": datasets.Value("string"),
96
- "translation": datasets.Translation(languages=self.config.language_pair),
97
- },
98
- ),
99
- supervised_keys=None,
100
- homepage=_HOMEPAGE_URL,
101
- citation=_CITATION,
102
- )
103
-
104
- def _split_generators(self, dl_manager):
105
- path = dl_manager.download_and_extract(_URL)
106
- return [
107
- datasets.SplitGenerator(
108
- name=datasets.Split.TRAIN,
109
- gen_kwargs={"datapath": path, "datatype": "train"},
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION,
113
- gen_kwargs={"datapath": path, "datatype": "dev"},
114
- ),
115
- datasets.SplitGenerator(
116
- name=datasets.Split.TEST,
117
- gen_kwargs={"datapath": path, "datatype": "test"},
118
- ),
119
- ]
120
-
121
- def _generate_examples(self, datapath, datatype):
122
- if datatype == "train":
123
- ur_file = "train.ur"
124
- en_file = "train.en"
125
- elif datatype == "dev":
126
- ur_file = "dev.ur"
127
- en_file = "dev.en"
128
- elif datatype == "test":
129
- ur_file = "test.ur"
130
- en_file = "test.en"
131
- else:
132
- raise Exception("Invalid dataype. Try one of: dev, train, test")
133
-
134
- for source in self.config.sources:
135
- urdu_path = os.path.join(datapath, source, ur_file)
136
- english_path = os.path.join(datapath, source, en_file)
137
- with open(urdu_path, encoding="utf-8") as u, open(english_path, encoding="utf-8") as e:
138
- for sentence_counter, (x, y) in enumerate(zip(u, e)):
139
- x = x.strip()
140
- y = y.strip()
141
- id_ = f"{source}-{sentence_counter}"
142
- yield id_, {
143
- "id": id_,
144
- "translation": {"ur": x, "en": y},
145
- }