biblenlp-corpus / biblenlp-corpus.py
DCNemesis
create repo
91a2b77
raw history blame
No virus
16.3 kB
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import os
import datasets
from tqdm import tqdm
import numpy as np
import ijson
_VERSION = "0.0.1"
_LANGUAGES = ['san', 'dji', 'eng', 'epo', 'cmn', 'cop', 'deu', 'hrv', 'kky', 'haw', 'bre', 'nld', 'bea', 'pon', 'pes', 'dif', 'heb', 'cha', 'kos', 'nay', 'hbo', 'arb', 'ita', 'rus', 'awk', 'arp', 'hat', 'swh', 'lat', 'bla', 'ces', 'spa', 'vie', 'grc', 'srp', 'fra', 'hlt', 'ukr', 'wro', 'ton', 'als', 'jpn']
_DESCRIPTION = "Bible Parallel Corpus"
_HOMEPAGE = 'https://github.com/BibleNLP/ebible-corpus'
_PAIR = 'all'
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
_LICENSE = 'Creative Commons and Public Domain, specified in the dataset'
_FILES = {'corpus':'corpus.json','vref':'vref.txt'}
class BiblenlpCorpusConfig(datasets.BuilderConfig):
def __init__(self, languages=[], pair='all', **kwargs):
'''
languages: list of languages to include
pair: 'all', 'range', or 'single' to specify whether verse ranges, single pairings, or all pairs are included
**kwargs: additional arguments to pass to the superclass'''
super(BiblenlpCorpusConfig, self).__init__(name=f'{"-".join([x for x in languages])}', **kwargs)
self.languages = languages
self.pair = pair
class BiblenlpCorpus(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
BiblenlpCorpusConfig(
languages=_LANGUAGES,
pair=_PAIR,
description = f'Parallel Bible verses with {_PAIR} pairings of {"-".join([x for x in _LANGUAGES])} languages',
)
]
BUILDER_CONFIG_CLASS = BiblenlpCorpusConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"translation": datasets.features.TranslationVariableLanguages(languages=_LANGUAGES),
"files": datasets.features.Sequence( {'lang':datasets.Value("string"), 'file':datasets.Value("string")} ),
"ref": datasets.features.Sequence( datasets.Value("string") ),
"licenses": datasets.features.Sequence( datasets.Value("string") ),
"copyrights": datasets.features.Sequence( datasets.Value("string") )
},
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=_VERSION
) #
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download(_FILES)
def parse_json(json_filename, langs):
with open(json_filename, 'rb') as input_file:
# load json iteratively
parser = ijson.parse(input_file)
parsed_dict = {lang: [] for lang in langs}
idx = {lang: -1 for lang in langs}
for prefix, event, value in parser:
#print(f'prefix:{prefix}, event:{event}, value:{value}')
if any([prefix.startswith(f'{lang}.') for lang in langs]):
#print(f'prefix:{prefix}, event:{event}, value:{value}')
if event == 'start_map':
idx[prefix.split('.')[0]] += 1
tmpdict ={}
tmpdict['verses']=[]
if prefix.endswith('verses.item'):
tmpdict['verses'].append(value)
if prefix.endswith('copyright'):
tmpdict['copyright'] = value
if prefix.endswith('text'):
tmpdict['text'] = value
if prefix.endswith('file'):
tmpdict['file'] = value
if prefix.endswith('license'):
tmpdict['license'] = value
if event == 'end_map':
#write the dictionary
parsed_dict[prefix.split('.')[0]].append( tmpdict.copy() )
return(parsed_dict)
def define_splits(corpus_slice, langs):
verse2split = {}
langtexts = {}
textverses = {}
vindexes = {}
vgroups = []
versesets ={}
np.random.seed(42)
for lang in tqdm(langs):
langtexts[lang] = set()
textverses[lang]={}
vindexes[lang]={}
for idx,line in enumerate(corpus_slice[lang]):
langtexts[lang].add(line['file'])
for v in line['verses']:
if not textverses[lang].get(line['file']):
textverses[lang][line['file']] = set()
textverses[lang][line['file']].add(v)
if not vindexes[lang].get(v):
vindexes[lang][v] = [idx]
else:
vindexes[lang][v].append(idx)
#print(list(vindexes['eng'].keys())[0:10])
#for f in langtexts['eng']:
# print(len(textverses['eng'][f]))
for line in tqdm(corpus_slice[langs[0]]):
versesets = {line['file']:line['verses']}
if any([verse2split.get(z) for z in line['verses']]):
for verse in line['verses']:
if verse2split.get(verse):
prevsplit = verse2split.get(verse)
break
split = prevsplit
else:
split = np.random.choice(['train','test','dev'],p=[.9,.05,.05])
if not all([verse2split.get(z) for z in line['verses']]):
all_verses = set()
for v in line['verses']:
all_verses.add(v)
while True:
verses_added = False
idxes = {k:set() for k in langs}
#get indexes for verses
for v in all_verses:
for lang in langs:
if vindexes[lang].get(v):
for idx in vindexes[lang][v]:
idxes[lang].add(idx)
for lang in langs:
for compline in [corpus_slice[lang][x] for x in idxes[lang] if x != set()]:
if all(x in textverses[lang][compline['file']] for x in all_verses) and any([x in list(all_verses) for x in compline['verses']]):
if not versesets.get(compline['file']):
versesets[compline['file']] = compline['verses'].copy()
else:
versesets[compline['file']].extend(compline['verses'].copy())
for v in compline['verses']:
pre_size = len(all_verses)
all_verses.add(v)
if len(all_verses) > pre_size:
verses_added = True
if verses_added == False or all([set(versesets[q]) == all_verses for q in versesets.keys()]):
vgroups.append(all_verses)
for v in all_verses:
verse2split[v] = split
break
return(vgroups,vindexes, verse2split)
corpus_slice = parse_json(downloaded_files['corpus'], self.config.languages)
vgroups, vindexes, verse2split = define_splits(corpus_slice, self.config.languages)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
'langs': self.config.languages,
'corpus': corpus_slice,
'vgroups': vgroups,
'vindexes': vindexes,
'v2split': verse2split,
'split': 'train',
'vref': downloaded_files['vref'],
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
'langs': self.config.languages,
'corpus': corpus_slice,
'vgroups': vgroups,
'vindexes': vindexes,
'v2split': verse2split,
'split': 'dev',
'vref': downloaded_files['vref'],
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
'langs': self.config.languages,
'corpus': corpus_slice,
'vgroups': vgroups,
'vindexes': vindexes,
'v2split': verse2split,
'split': 'test',
'vref': downloaded_files['vref'],
}
),
]
def _generate_examples(self, vref, corpus, vgroups, vindexes, v2split, split, langs):
#print(f'Generating {split} examples...')
with open(vref, 'r') as txtfile:
#print('file opened')
lines = txtfile.readlines()
#print('lines read')
verse2index = {k.strip():v for v,k in enumerate(lines) if k.strip() != ''}
#print('v2i created')
def order_verses(verse_list):
lines_list = [int(verse2index[x]) for x in verse_list]
verse_order = np.argsort(lines_list)
return(verse_order)
trans_license = {}
trans_copyright = {}
id = -1
#print(f'beginning groups, starting with {list(vgroups)[0]}')
for group in vgroups:
#print(group)
if v2split.get(list(group)[0]) == split:
#print('correct split')
if len(group) > 1:
v_order = order_verses(group)
o_group = list(np.array(list(group))[v_order])
else:
o_group = list(group)
trans_dict={k:{} for k in langs}
trans_texts = {k:{} for k in langs}
used_idxes = {k:[] for k in langs}
for v in o_group:
#print(f'trying verse {v}')
single_texts = {k:{} for k in langs}
single_dict = {k:{} for k in langs}
for lang in langs:
if vindexes[lang].get(v):
try:
for i in range(0,len(list(vindexes[lang][v]))):
if list(vindexes[lang][v])[i] not in used_idxes[lang]:
used_idxes[lang].append(vindexes[lang][v])
line = corpus[lang][vindexes[lang][v][i]]
if not trans_texts.get(line['file']):
trans_texts[lang][line['file']]=line['text']
else:
trans_texts[lang][line['file']] += f' {line["text"]}'
if line.get('license'):
trans_license[line['file']]=line['license']
if line.get('copyright'):
trans_copyright[line['file']]=line['copyright']
if len(line['verses']) == 1:
single_texts[lang][line['file']] = line['text']
except:
print(lang,v)
raise
single_dict[lang]=list(single_texts[lang].values())
trans_dict[lang]=list(trans_texts[lang].values())
single_file = {x:list(single_texts[x].keys()) for x in langs}
single_lic_list =[]
single_copy_list = []
for lang in langs:
single_lic_list.extend([trans_license.get(x,'') for x in single_file[lang]])
single_copy_list.extend([trans_copyright.get(x,'') for x in single_file[lang]])
if all([single_dict.get(x) and single_dict.get(x) != [{}] and list(single_dict.get(x)) for x in langs]) and len(list(single_dict.keys())) == len(langs) and self.config.pair != 'range':
id = id + 1
sfile_list = []
for key in langs:
for value in single_file.get(key):
sfile_list.append({'lang':key,'file':value})
#print('outputting single example')
# print(id, single_dict, sfile_list, v, single_lic_list, single_copy_list)
try:
yield(id, {'translation': single_dict, 'files': sfile_list, 'ref':[v], 'licenses':single_lic_list, 'copyrights':single_copy_list})
except:
print(id, single_dict, sfile_list, v, single_lic_list, single_copy_list)
raise
file_list = {x:list(trans_texts[x].keys()) for x in langs}
#license_list = [trans_license.get(x,'') for x in [y for y in file_list.values()]]
#copyright_list = [trans_copyright.get(x,'') for x in [y for y in file_list.values()]]
license_list = []
copyright_list = []
for lang in langs:
license_list.extend([trans_license.get(x,'') for x in file_list[lang]])
copyright_list .extend([trans_copyright.get(x,'') for x in file_list[lang]])
if len(o_group)>1 and all([trans_dict.get(x) and trans_dict.get(x) != [{}] and list(trans_dict.get(x)) for x in langs]) and len(list(trans_dict.keys())) == len(langs) and self.config.pair != 'single':
id = id + 1
ofile_list = []
for key in langs:
for value in file_list.get(key):
ofile_list.append({'lang':key,'file':value})
#print('outputting range example')
try:
yield(id, {'translation': trans_dict, 'files': ofile_list, 'ref':o_group, 'licenses':license_list, 'copyrights':copyright_list})
except:
print(id, trans_dict, ofile_list, o_group, license_list, copyright_list)
raise