DCNemesis commited on
Commit
91a2b77
1 Parent(s): f470f82

create repo

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. README.md +94 -0
  3. biblenlp-corpus.py +334 -0
  4. corpus.json +3 -0
  5. vref.txt +0 -0
.gitattributes CHANGED
@@ -36,3 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  *.mp3 filter=lfs diff=lfs merge=lfs -text
37
  *.ogg filter=lfs diff=lfs merge=lfs -text
38
  *.wav filter=lfs diff=lfs merge=lfs -text
 
36
  *.mp3 filter=lfs diff=lfs merge=lfs -text
37
  *.ogg filter=lfs diff=lfs merge=lfs -text
38
  *.wav filter=lfs diff=lfs merge=lfs -text
39
+ corpus.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language_creators:
5
+ - expert-generated
6
+ languages:
7
+ - san
8
+ - dji
9
+ - eng
10
+ - epo
11
+ - cmn
12
+ - cop
13
+ - deu
14
+ - hrv
15
+ - kky
16
+ - haw
17
+ - bre
18
+ - nld
19
+ - bea
20
+ - pon
21
+ - pes
22
+ - dif
23
+ - heb
24
+ - cha
25
+ - kos
26
+ - nay
27
+ - hbo
28
+ - arb
29
+ - ita
30
+ - rus
31
+ - awk
32
+ - arp
33
+ - hat
34
+ - swh
35
+ - lat
36
+ - bla
37
+ - ces
38
+ - spa
39
+ - vie
40
+ - grc
41
+ - srp
42
+ - fra
43
+ - hlt
44
+ - ukr
45
+ - wro
46
+ - ton
47
+ - als
48
+ - jpn
49
+ licenses:
50
+ - cc-by-4.0
51
+ - other-public-domain
52
+ multilinguality:
53
+ - translation
54
+ pretty_name: biblenlp-corpus
55
+ size_categories:
56
+ - 1M<n<10M
57
+ source_datasets:
58
+ - original
59
+ task_categories:
60
+ - translation
61
+ task_ids: []
62
+ ---
63
+
64
+ # Dataset Card for BibleNLP Corpus
65
+
66
+ ### Dataset Summary
67
+ Partial and complete Bible translations in 42 languages, aligned by verse.
68
+
69
+ ### Languages
70
+ als, arb, arp, awk, bea, bla, bre, ces, cha, cmn, cop, deu, dif, dji, eng, epo, fra, grc, hat, haw, hbo, heb, hlt, hrv, ita, jpn, kky, kos, lat, nay, nld, pes, pon, rus, san, spa, srp, swh, ton, ukr, vie, wro
71
+
72
+ ## Dataset Structure
73
+
74
+ ### Data Fields
75
+
76
+ **translation**
77
+ - **languages** - an N length list of the languages of the translations, sorted alphabetically
78
+ - **translation** - an N length list with the translations each corresponding to the language specified in the above field
79
+ **files**
80
+ - **lang** - an N length list of the languages of the files, in order of input
81
+ - **file** - an N length list of the filenames from the corpus on github, each corresponding with the lang above
82
+ **ref** - the verse(s) contained in the record, as a list, with each represented with: <a three letter book code> <chapter number>:<verse number>
83
+ **licenses** - an N length list of licenses, corresponding to the list of files above
84
+ **copyrights** - information on copyright holders, corresponding to the list of files above
85
+
86
+ ### Usage
87
+
88
+ The dataset loading script requires installation of tqdm, ijson, and numpy
89
+
90
+ Specify the languages to be paired with a list and ISO 693-3 language codes, such as ``languages = ['eng', 'fra']``.
91
+ By default, the script will return individual verse pairs, as well as verses covering a full range. If only the individual verses is desired, use ``pair='single'``. If only the maximum range pairing is desired use ``pair='range'`` (for example, if one text uses the verse range covering GEN 1:1-3, all texts would return only the full length pairing).
92
+
93
+ ## Sources
94
+ https://github.com/BibleNLP/ebible-corpus
biblenlp-corpus.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import os
18
+
19
+ import datasets
20
+ from tqdm import tqdm
21
+ import numpy as np
22
+ import ijson
23
+
24
+ _VERSION = "0.0.1"
25
+ _LANGUAGES = ['san', 'dji', 'eng', 'epo', 'cmn', 'cop', 'deu', 'hrv', 'kky', 'haw', 'bre', 'nld', 'bea', 'pon', 'pes', 'dif', 'heb', 'cha', 'kos', 'nay', 'hbo', 'arb', 'ita', 'rus', 'awk', 'arp', 'hat', 'swh', 'lat', 'bla', 'ces', 'spa', 'vie', 'grc', 'srp', 'fra', 'hlt', 'ukr', 'wro', 'ton', 'als', 'jpn']
26
+ _DESCRIPTION = "Bible Parallel Corpus"
27
+ _HOMEPAGE = 'https://github.com/BibleNLP/ebible-corpus'
28
+ _PAIR = 'all'
29
+ _CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {A great new dataset},
32
+ author={huggingface, Inc.
33
+ },
34
+ year={2020}
35
+ }
36
+ """
37
+ _LICENSE = 'Creative Commons and Public Domain, specified in the dataset'
38
+ _FILES = {'corpus':'corpus.json','vref':'vref.txt'}
39
+
40
+ class BiblenlpCorpusConfig(datasets.BuilderConfig):
41
+ def __init__(self, languages=[], pair='all', **kwargs):
42
+ '''
43
+ languages: list of languages to include
44
+ pair: 'all', 'range', or 'single' to specify whether verse ranges, single pairings, or all pairs are included
45
+ **kwargs: additional arguments to pass to the superclass'''
46
+ super(BiblenlpCorpusConfig, self).__init__(name=f'{"-".join([x for x in languages])}', **kwargs)
47
+ self.languages = languages
48
+ self.pair = pair
49
+
50
+ class BiblenlpCorpus(datasets.GeneratorBasedBuilder):
51
+ BUILDER_CONFIGS = [
52
+ BiblenlpCorpusConfig(
53
+ languages=_LANGUAGES,
54
+ pair=_PAIR,
55
+ description = f'Parallel Bible verses with {_PAIR} pairings of {"-".join([x for x in _LANGUAGES])} languages',
56
+ )
57
+ ]
58
+
59
+ BUILDER_CONFIG_CLASS = BiblenlpCorpusConfig
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "translation": datasets.features.TranslationVariableLanguages(languages=_LANGUAGES),
67
+ "files": datasets.features.Sequence( {'lang':datasets.Value("string"), 'file':datasets.Value("string")} ),
68
+ "ref": datasets.features.Sequence( datasets.Value("string") ),
69
+ "licenses": datasets.features.Sequence( datasets.Value("string") ),
70
+ "copyrights": datasets.features.Sequence( datasets.Value("string") )
71
+ },
72
+ ),
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE,
75
+ citation=_CITATION,
76
+ license=_LICENSE,
77
+ version=_VERSION
78
+ ) #
79
+
80
+ def _split_generators(self, dl_manager):
81
+ downloaded_files = dl_manager.download(_FILES)
82
+
83
+ def parse_json(json_filename, langs):
84
+ with open(json_filename, 'rb') as input_file:
85
+ # load json iteratively
86
+ parser = ijson.parse(input_file)
87
+ parsed_dict = {lang: [] for lang in langs}
88
+ idx = {lang: -1 for lang in langs}
89
+ for prefix, event, value in parser:
90
+ #print(f'prefix:{prefix}, event:{event}, value:{value}')
91
+ if any([prefix.startswith(f'{lang}.') for lang in langs]):
92
+ #print(f'prefix:{prefix}, event:{event}, value:{value}')
93
+ if event == 'start_map':
94
+ idx[prefix.split('.')[0]] += 1
95
+ tmpdict ={}
96
+ tmpdict['verses']=[]
97
+ if prefix.endswith('verses.item'):
98
+ tmpdict['verses'].append(value)
99
+ if prefix.endswith('copyright'):
100
+ tmpdict['copyright'] = value
101
+ if prefix.endswith('text'):
102
+ tmpdict['text'] = value
103
+ if prefix.endswith('file'):
104
+ tmpdict['file'] = value
105
+ if prefix.endswith('license'):
106
+ tmpdict['license'] = value
107
+ if event == 'end_map':
108
+ #write the dictionary
109
+ parsed_dict[prefix.split('.')[0]].append( tmpdict.copy() )
110
+ return(parsed_dict)
111
+
112
+ def define_splits(corpus_slice, langs):
113
+ verse2split = {}
114
+ langtexts = {}
115
+ textverses = {}
116
+ vindexes = {}
117
+ vgroups = []
118
+ versesets ={}
119
+
120
+ np.random.seed(42)
121
+
122
+ for lang in tqdm(langs):
123
+ langtexts[lang] = set()
124
+ textverses[lang]={}
125
+ vindexes[lang]={}
126
+ for idx,line in enumerate(corpus_slice[lang]):
127
+ langtexts[lang].add(line['file'])
128
+ for v in line['verses']:
129
+ if not textverses[lang].get(line['file']):
130
+ textverses[lang][line['file']] = set()
131
+ textverses[lang][line['file']].add(v)
132
+ if not vindexes[lang].get(v):
133
+ vindexes[lang][v] = [idx]
134
+ else:
135
+ vindexes[lang][v].append(idx)
136
+
137
+ #print(list(vindexes['eng'].keys())[0:10])
138
+ #for f in langtexts['eng']:
139
+ # print(len(textverses['eng'][f]))
140
+
141
+ for line in tqdm(corpus_slice[langs[0]]):
142
+ versesets = {line['file']:line['verses']}
143
+ if any([verse2split.get(z) for z in line['verses']]):
144
+ for verse in line['verses']:
145
+ if verse2split.get(verse):
146
+ prevsplit = verse2split.get(verse)
147
+ break
148
+ split = prevsplit
149
+ else:
150
+ split = np.random.choice(['train','test','dev'],p=[.9,.05,.05])
151
+ if not all([verse2split.get(z) for z in line['verses']]):
152
+ all_verses = set()
153
+ for v in line['verses']:
154
+ all_verses.add(v)
155
+ while True:
156
+ verses_added = False
157
+ idxes = {k:set() for k in langs}
158
+ #get indexes for verses
159
+ for v in all_verses:
160
+ for lang in langs:
161
+ if vindexes[lang].get(v):
162
+ for idx in vindexes[lang][v]:
163
+ idxes[lang].add(idx)
164
+
165
+ for lang in langs:
166
+ for compline in [corpus_slice[lang][x] for x in idxes[lang] if x != set()]:
167
+
168
+ if all(x in textverses[lang][compline['file']] for x in all_verses) and any([x in list(all_verses) for x in compline['verses']]):
169
+ if not versesets.get(compline['file']):
170
+ versesets[compline['file']] = compline['verses'].copy()
171
+ else:
172
+ versesets[compline['file']].extend(compline['verses'].copy())
173
+ for v in compline['verses']:
174
+ pre_size = len(all_verses)
175
+ all_verses.add(v)
176
+ if len(all_verses) > pre_size:
177
+ verses_added = True
178
+
179
+ if verses_added == False or all([set(versesets[q]) == all_verses for q in versesets.keys()]):
180
+ vgroups.append(all_verses)
181
+ for v in all_verses:
182
+ verse2split[v] = split
183
+ break
184
+
185
+ return(vgroups,vindexes, verse2split)
186
+
187
+
188
+ corpus_slice = parse_json(downloaded_files['corpus'], self.config.languages)
189
+ vgroups, vindexes, verse2split = define_splits(corpus_slice, self.config.languages)
190
+
191
+
192
+ return [
193
+ datasets.SplitGenerator(
194
+ name=datasets.Split.TRAIN,
195
+ gen_kwargs={
196
+ 'langs': self.config.languages,
197
+ 'corpus': corpus_slice,
198
+ 'vgroups': vgroups,
199
+ 'vindexes': vindexes,
200
+ 'v2split': verse2split,
201
+ 'split': 'train',
202
+ 'vref': downloaded_files['vref'],
203
+ }
204
+ ),
205
+ datasets.SplitGenerator(
206
+ name=datasets.Split.VALIDATION,
207
+ gen_kwargs={
208
+ 'langs': self.config.languages,
209
+ 'corpus': corpus_slice,
210
+ 'vgroups': vgroups,
211
+ 'vindexes': vindexes,
212
+ 'v2split': verse2split,
213
+ 'split': 'dev',
214
+ 'vref': downloaded_files['vref'],
215
+ }
216
+ ),
217
+ datasets.SplitGenerator(
218
+ name=datasets.Split.TEST,
219
+ gen_kwargs={
220
+ 'langs': self.config.languages,
221
+ 'corpus': corpus_slice,
222
+ 'vgroups': vgroups,
223
+ 'vindexes': vindexes,
224
+ 'v2split': verse2split,
225
+ 'split': 'test',
226
+ 'vref': downloaded_files['vref'],
227
+ }
228
+ ),
229
+ ]
230
+
231
+
232
+
233
+
234
+
235
+ def _generate_examples(self, vref, corpus, vgroups, vindexes, v2split, split, langs):
236
+ #print(f'Generating {split} examples...')
237
+ with open(vref, 'r') as txtfile:
238
+ #print('file opened')
239
+ lines = txtfile.readlines()
240
+ #print('lines read')
241
+ verse2index = {k.strip():v for v,k in enumerate(lines) if k.strip() != ''}
242
+ #print('v2i created')
243
+
244
+ def order_verses(verse_list):
245
+ lines_list = [int(verse2index[x]) for x in verse_list]
246
+ verse_order = np.argsort(lines_list)
247
+ return(verse_order)
248
+
249
+ trans_license = {}
250
+ trans_copyright = {}
251
+ id = -1
252
+ #print(f'beginning groups, starting with {list(vgroups)[0]}')
253
+ for group in vgroups:
254
+ #print(group)
255
+ if v2split.get(list(group)[0]) == split:
256
+ #print('correct split')
257
+ if len(group) > 1:
258
+ v_order = order_verses(group)
259
+ o_group = list(np.array(list(group))[v_order])
260
+
261
+ else:
262
+ o_group = list(group)
263
+ trans_dict={k:{} for k in langs}
264
+ trans_texts = {k:{} for k in langs}
265
+ used_idxes = {k:[] for k in langs}
266
+ for v in o_group:
267
+ #print(f'trying verse {v}')
268
+ single_texts = {k:{} for k in langs}
269
+ single_dict = {k:{} for k in langs}
270
+ for lang in langs:
271
+ if vindexes[lang].get(v):
272
+ try:
273
+ for i in range(0,len(list(vindexes[lang][v]))):
274
+ if list(vindexes[lang][v])[i] not in used_idxes[lang]:
275
+ used_idxes[lang].append(vindexes[lang][v])
276
+ line = corpus[lang][vindexes[lang][v][i]]
277
+ if not trans_texts.get(line['file']):
278
+ trans_texts[lang][line['file']]=line['text']
279
+ else:
280
+ trans_texts[lang][line['file']] += f' {line["text"]}'
281
+ if line.get('license'):
282
+ trans_license[line['file']]=line['license']
283
+ if line.get('copyright'):
284
+ trans_copyright[line['file']]=line['copyright']
285
+ if len(line['verses']) == 1:
286
+ single_texts[lang][line['file']] = line['text']
287
+ except:
288
+ print(lang,v)
289
+ raise
290
+
291
+ single_dict[lang]=list(single_texts[lang].values())
292
+ trans_dict[lang]=list(trans_texts[lang].values())
293
+
294
+ single_file = {x:list(single_texts[x].keys()) for x in langs}
295
+ single_lic_list =[]
296
+ single_copy_list = []
297
+ for lang in langs:
298
+ single_lic_list.extend([trans_license.get(x,'') for x in single_file[lang]])
299
+ single_copy_list.extend([trans_copyright.get(x,'') for x in single_file[lang]])
300
+ if all([single_dict.get(x) and single_dict.get(x) != [{}] and list(single_dict.get(x)) for x in langs]) and len(list(single_dict.keys())) == len(langs) and self.config.pair != 'range':
301
+ id = id + 1
302
+ sfile_list = []
303
+ for key in langs:
304
+ for value in single_file.get(key):
305
+ sfile_list.append({'lang':key,'file':value})
306
+
307
+ #print('outputting single example')
308
+ # print(id, single_dict, sfile_list, v, single_lic_list, single_copy_list)
309
+ try:
310
+ yield(id, {'translation': single_dict, 'files': sfile_list, 'ref':[v], 'licenses':single_lic_list, 'copyrights':single_copy_list})
311
+ except:
312
+ print(id, single_dict, sfile_list, v, single_lic_list, single_copy_list)
313
+ raise
314
+
315
+ file_list = {x:list(trans_texts[x].keys()) for x in langs}
316
+ #license_list = [trans_license.get(x,'') for x in [y for y in file_list.values()]]
317
+ #copyright_list = [trans_copyright.get(x,'') for x in [y for y in file_list.values()]]
318
+ license_list = []
319
+ copyright_list = []
320
+ for lang in langs:
321
+ license_list.extend([trans_license.get(x,'') for x in file_list[lang]])
322
+ copyright_list .extend([trans_copyright.get(x,'') for x in file_list[lang]])
323
+ if len(o_group)>1 and all([trans_dict.get(x) and trans_dict.get(x) != [{}] and list(trans_dict.get(x)) for x in langs]) and len(list(trans_dict.keys())) == len(langs) and self.config.pair != 'single':
324
+ id = id + 1
325
+ ofile_list = []
326
+ for key in langs:
327
+ for value in file_list.get(key):
328
+ ofile_list.append({'lang':key,'file':value})
329
+ #print('outputting range example')
330
+ try:
331
+ yield(id, {'translation': trans_dict, 'files': ofile_list, 'ref':o_group, 'licenses':license_list, 'copyrights':copyright_list})
332
+ except:
333
+ print(id, trans_dict, ofile_list, o_group, license_list, copyright_list)
334
+ raise
corpus.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a9add6b5fad8c0c77882189f6f28bd886436bd8e2aa4e89510a163c3a44cc1d
3
+ size 726783617
vref.txt ADDED
The diff for this file is too large to render. See raw diff