sonalsannigrahi's picture
Upload 382 files (#1)
a93e458 verified
#!/bin/bash
megatron_model="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/megatron_model"
sharded_model="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/spgi_vox_mls_text_1b/shards"
tp="2"
pp="1"
repo="/mnt/scratch-artemis/kshitij/LLAMA/latest_megatron_codebase/multilinguality_megatron"
vocab_size="37005"
# Parse command-line arguments
for arg in "$@"
do
case $arg in
--help)
echo "Usage: ./script.sh [OPTIONS]"
echo "Options:"
echo " --megatron_model=PATH Path to sharded megatron model"
echo " --sharded_model=PATH Path to save sharded model."
echo " --tp=NUMBER Number of shards to divide model in."
echo " --pp=NUMBER Pipeline parallel (default is 1)"
echo " --repo=PATH Path to repo"
echo " --vocab_size=NUMBER Vocab size of model without padding"
exit 0
;;
--megatron_model=*)
megatron_model="${arg#*=}"
shift
;;
--sharded_model=*)
sharded_model="${arg#*=}"
shift
;;
--tp=*)
tp="${arg#*=}"
shift
;;
--pp=*)
pp="${arg#*=}"
shift
;;
--repo=*)
repo="${arg#*=}"
shift
;;
--vocab_size=*)
vocab_size="${arg#*=}"
shift
;;
esac
done
python $repo/tools/checkpoint_util.py \
--target_tensor_parallel_size $tp \
--target_pipeline_parallel_size $pp \
--load_dir $megatron_model \
--save_dir $sharded_model \
--model_type llama \
--true_vocab_size $vocab_size \
--bf16