|
import glob |
|
import os |
|
|
|
from datasets import load_dataset |
|
|
|
_LANGUAGES = ['bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'fi', 'fr', 'ga', 'hr', |
|
'hu', 'it', 'lt', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl', 'sv'] |
|
ERROR_LANGS = ['et', 'ga'] |
|
ERROR_LANGS = [] |
|
FINAL_LANGS = [l for l in _LANGUAGES if l not in ERROR_LANGS] |
|
date = "20221120" |
|
|
|
base_dir = 'data' |
|
date_dir = os.path.join(base_dir, date) |
|
os.makedirs(date_dir, exist_ok=True) |
|
|
|
max_file_size = 4 |
|
|
|
|
|
def process_language(LANG): |
|
print(f'Processing language {LANG}...') |
|
|
|
dataset = load_dataset("olm/wikipedia", language=LANG, date=date, split='train') |
|
size_in_gb = dataset.size_in_bytes / 1e9 |
|
print(f'Found {size_in_gb} GB of data ({len(dataset)} documents) for language {LANG}...') |
|
if size_in_gb > max_file_size: |
|
num_shards = int(size_in_gb / max_file_size) + 1 |
|
for shard in range(num_shards): |
|
dataset.shard(num_shards, shard).to_json(f'{date_dir}/{LANG}_{shard}.jsonl', lines=True) |
|
else: |
|
dataset.to_json(f'{date_dir}/{LANG}_0.jsonl', lines=True) |
|
|
|
|
|
if __name__ == '__main__': |
|
""" |
|
Run with |
|
export PYTHONPATH=. && python prepare_wikipedias.py | tee prepare_wikipedias.log |
|
""" |
|
|
|
for LANG in FINAL_LANGS: |
|
process_language(LANG) |
|
|
|
|
|
print(f"Compressing datasets at {date_dir}") |
|
|
|
for path in glob.glob(os.path.join(date_dir, '*.jsonl')): |
|
os.system(f'xz -zkf -T0 {path}') |
|
|
|
|