#!/bin/bash #SBATCH --ntasks=1 # number of MP tasks #SBATCH --nodes=1 #SBATCH --cpus-per-task=64 # number of cores per tasks #SBATCH --hint=nomultithread # we get physical cores not logical #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) #SBATCH --output=%x-%j.out # output file name #SBATCH --account=project_462000119 #SBATCH --partition=small set -x -e #source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0 source /scratch/project_462000119/muennighoff/nov-2022-bettercom/venv/bin/activate export HF_DATASETS_OFFLINE=1 export TRANSFORMERS_OFFLINE=1 #MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed MEGATRON_DEEPSPEED_REPO=/scratch/project_462000119/muennighoff/nov-2022-mtf/Megatron-DeepSpeed TOKENIZER_PATH="bigscience/tokenizer" LANGS=( ak ar as bm bn ca code en es eu fon fr gu hi id ig ki kn lg ln ml mr ne nso ny or pa pt rn rw sn st sw ta te tn ts tum tw ur vi wo xh yo zh zu ) LANGS=( ru ) #DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/xp3cappedmixednewcodelong #OUTPUT=/gpfswork/rech/six/commun/bigscience-training/xp3cappedmixednewcodelong DATA_PATH=/scratch/project_462000119/muennighoff/nov-2022-mtf/xp3ru/ru OUTPUT=/scratch/project_462000119/muennighoff/nov-2022-mtf/xp3rumegds mkdir -p $OUTPUT for val in {0..1}; do LANG=${LANGS[$val]} cd $DATA_PATH # Merge cat *.jsonl > merged_dups_$LANG.jsonl # Drop duplicates (~1G / 37G for en) + Shuffle sort -u merged_dups_$LANG.jsonl | shuf > merged_$LANG.jsonl cd $MEGATRON_DEEPSPEED_REPO python tools/preprocess_data.py \ --input $DATA_PATH/merged_$LANG.jsonl \ --output-prefix $OUTPUT/xp3_$LANG \ --dataset-impl mmap \ --json-key inputs \ --tokenizer-type PretrainedFromHF \ --tokenizer-name-or-path $TOKENIZER_PATH \ --workers 60 python tools/preprocess_data.py \ --input $DATA_PATH/merged_$LANG.jsonl \ --output-prefix $OUTPUT/xp3_$LANG \ --dataset-impl mmap \ --json-key targets \ --tokenizer-type PretrainedFromHF \ --tokenizer-name-or-path $TOKENIZER_PATH \ --append-eod \ --prepend-space \ --workers 60 done