albertvillanova HF staff commited on
Commit
6ca1255
1 Parent(s): 6c0e9e6

Delete loading script

Browse files
Files changed (1) hide show
  1. um005.py +0 -145
um005.py DELETED
@@ -1,145 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- UMC005 English-Urdu is a parallel corpus of texts in English and Urdu language with sentence alignments. The corpus can be used for experiments with statistical machine translation.
24
-
25
- The texts come from four different sources:
26
- - Quran
27
- - Bible
28
- - Penn Treebank (Wall Street Journal)
29
- - Emille corpus
30
-
31
- The authors provide the religious texts of Quran and Bible for direct download. Because of licensing reasons, Penn and Emille texts cannot be redistributed freely. However, if you already hold a license for the original corpora, we are able to provide scripts that will recreate our data on your disk. Our modifications include but are not limited to the following:
32
-
33
- - Correction of Urdu translations and manual sentence alignment of the Emille texts.
34
- - Manually corrected sentence alignment of the other corpora.
35
- - Our data split (training-development-test) so that our published experiments can be reproduced.
36
- - Tokenization (optional, but needed to reproduce our experiments).
37
- - Normalization (optional) of e.g. European vs. Urdu numerals, European vs. Urdu punctuation, removal of Urdu diacritics.
38
- """
39
- _HOMEPAGE_URL = "https://ufal.mff.cuni.cz/umc/005-en-ur/"
40
- _URL = "https://ufal.mff.cuni.cz/umc/005-en-ur/download.php?f=umc005-corpus.zip"
41
- _CITATION = """\
42
- @unpublished{JaZeWordOrderIssues2011,
43
- author = {Bushra Jawaid and Daniel Zeman},
44
- title = {Word-Order Issues in {English}-to-{Urdu} Statistical Machine Translation},
45
- year = {2011},
46
- journal = {The Prague Bulletin of Mathematical Linguistics},
47
- number = {95},
48
- institution = {Univerzita Karlova},
49
- address = {Praha, Czechia},
50
- issn = {0032-6585},
51
- }
52
- """
53
-
54
- _ALL = "all"
55
- _VERSION = "1.0.0"
56
- _SOURCES = ["bible", "quran"]
57
- _SOURCES_FILEPATHS = {
58
- s: {
59
- "train": {"urdu": "train.ur", "english": "train.en"},
60
- "dev": {"urdu": "dev.ur", "english": "dev.en"},
61
- "test": {"urdu": "test.ur", "english": "test.en"},
62
- }
63
- for s in _SOURCES
64
- }
65
-
66
-
67
- class UM005Config(datasets.BuilderConfig):
68
- def __init__(self, *args, sources=None, **kwargs):
69
- super().__init__(*args, version=datasets.Version(_VERSION, ""), **kwargs)
70
- self.sources = sources
71
-
72
- @property
73
- def language_pair(self):
74
- return ("ur", "en")
75
-
76
-
77
- class UM005(datasets.GeneratorBasedBuilder):
78
- BUILDER_CONFIGS = [
79
- UM005Config(name=source, sources=[source], description=f"Source: {source}.") for source in _SOURCES
80
- ] + [
81
- UM005Config(
82
- name=_ALL,
83
- sources=_SOURCES,
84
- description="All sources included: bible, quran",
85
- )
86
- ]
87
- BUILDER_CONFIG_CLASS = UM005Config
88
- DEFAULT_CONFIG_NAME = _ALL
89
-
90
- def _info(self):
91
- return datasets.DatasetInfo(
92
- description=_DESCRIPTION,
93
- features=datasets.Features(
94
- {
95
- "id": datasets.Value("string"),
96
- "translation": datasets.Translation(languages=self.config.language_pair),
97
- },
98
- ),
99
- supervised_keys=None,
100
- homepage=_HOMEPAGE_URL,
101
- citation=_CITATION,
102
- )
103
-
104
- def _split_generators(self, dl_manager):
105
- path = dl_manager.download_and_extract(_URL)
106
- return [
107
- datasets.SplitGenerator(
108
- name=datasets.Split.TRAIN,
109
- gen_kwargs={"datapath": path, "datatype": "train"},
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION,
113
- gen_kwargs={"datapath": path, "datatype": "dev"},
114
- ),
115
- datasets.SplitGenerator(
116
- name=datasets.Split.TEST,
117
- gen_kwargs={"datapath": path, "datatype": "test"},
118
- ),
119
- ]
120
-
121
- def _generate_examples(self, datapath, datatype):
122
- if datatype == "train":
123
- ur_file = "train.ur"
124
- en_file = "train.en"
125
- elif datatype == "dev":
126
- ur_file = "dev.ur"
127
- en_file = "dev.en"
128
- elif datatype == "test":
129
- ur_file = "test.ur"
130
- en_file = "test.en"
131
- else:
132
- raise Exception("Invalid dataype. Try one of: dev, train, test")
133
-
134
- for source in self.config.sources:
135
- urdu_path = os.path.join(datapath, source, ur_file)
136
- english_path = os.path.join(datapath, source, en_file)
137
- with open(urdu_path, encoding="utf-8") as u, open(english_path, encoding="utf-8") as e:
138
- for sentence_counter, (x, y) in enumerate(zip(u, e)):
139
- x = x.strip()
140
- y = y.strip()
141
- id_ = f"{source}-{sentence_counter}"
142
- yield id_, {
143
- "id": id_,
144
- "translation": {"ur": x, "en": y},
145
- }