EU_Wikipedias / prepare_wikipedias.py
joelniklaus's picture
added data loader script and data preparation script
30dd125
import glob
import os
from datasets import load_dataset
_LANGUAGES = ['bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'fi', 'fr', 'ga', 'hr',
'hu', 'it', 'lt', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl', 'sv']
ERROR_LANGS = ['et', 'ga'] # 20221101: ga somehow hangs at sample 36715/57711, for et no data is downloaded
ERROR_LANGS = [] # 20221120: no errors here
FINAL_LANGS = [l for l in _LANGUAGES if l not in ERROR_LANGS]
date = "20221120" # every 01 and 20 of the month, just edit this to generate newer data
base_dir = 'data'
date_dir = os.path.join(base_dir, date)
os.makedirs(date_dir, exist_ok=True)
max_file_size = 4 # GB
def process_language(LANG):
print(f'Processing language {LANG}...')
# streaming does not work here!
dataset = load_dataset("olm/wikipedia", language=LANG, date=date, split='train')
size_in_gb = dataset.size_in_bytes / 1e9
print(f'Found {size_in_gb} GB of data ({len(dataset)} documents) for language {LANG}...')
if size_in_gb > max_file_size:
num_shards = int(size_in_gb / max_file_size) + 1
for shard in range(num_shards):
dataset.shard(num_shards, shard).to_json(f'{date_dir}/{LANG}_{shard}.jsonl', lines=True)
else:
dataset.to_json(f'{date_dir}/{LANG}_0.jsonl', lines=True)
if __name__ == '__main__':
"""
Run with
export PYTHONPATH=. && python prepare_wikipedias.py | tee prepare_wikipedias.log
"""
# it does not work in parallel
for LANG in FINAL_LANGS:
process_language(LANG)
# Compress datasets
print(f"Compressing datasets at {date_dir}")
# Do this at the end because we use multithreading
for path in glob.glob(os.path.join(date_dir, '*.jsonl')):
os.system(f'xz -zkf -T0 {path}') # -TO to use multithreading
# os.system(f'rm {path}') # remove uncompressed file to save space