|
#!/bin/bash |
|
|
|
dataset_json="/mnt/scratch-artemis/kshitij/oneB_experiment/new_data_wout_covost/combined/to_tokenize.jsonl" |
|
dataset_bin="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b" |
|
vocab_file="/mnt/scratch-artemis/kshitij/LLAMA/Megatron_LLM/temp/new_tokenizer/tokenizer.model" |
|
repo="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron" |
|
|
|
|
|
for arg in "$@" |
|
do |
|
case $arg in |
|
--help) |
|
echo "Usage: ./script.sh [OPTIONS]" |
|
echo "Options:" |
|
echo " --dataset_json=PATH Path to dataset json." |
|
echo " --dataset_bin=PATH Path to save preprocessed data." |
|
echo " --vocab_file=PATH Path to tokenizer.model file of HF model to be trained." |
|
echo " --repo=PATH Path to repo." |
|
exit 0 |
|
;; |
|
--dataset_json=*) |
|
dataset_json="${arg#*=}" |
|
shift |
|
;; |
|
--dataset_bin=*) |
|
dataset_bin="${arg#*=}" |
|
shift |
|
;; |
|
--vocab_file=*) |
|
vocab_file="${arg#*=}" |
|
shift |
|
;; |
|
--repo=*) |
|
repo="${arg#*=}" |
|
shift |
|
;; |
|
esac |
|
done |
|
|
|
echo $repo |
|
mkdir -p $dataset_bin |
|
python $repo/tools/preprocess_data.py \ |
|
--input=$dataset_json \ |
|
--output_prefix=$dataset_bin/data \ |
|
--tokenizer_type=SentencePieceTokenizer \ |
|
--vocab_file=$vocab_file \ |
|
--chunk_size=64 \ |
|
--workers=64 \ |
|
--append_eod \ |
|
--vocab_extra_ids 5000 |
|
|