glm / generate_block.sh
guoqiang wang
Upload generate_block.sh
2a5111e
#!/bin/bash
CHECKPOINT_PATH=/root/data/checkpoints
CHECKPOINT_PATH=../../../model_save/checkpoints/checkpoints
source $1
MPSIZE=1
MAXSEQLEN=512
MASTER_PORT=$(shuf -n 1 -i 10000-65535)
#SAMPLING ARGS
TEMP=0.9
#If TOPK/TOPP are 0 it defaults to greedy sampling, top-k will also override top-p
TOPK=40
TOPP=0
script_path=$(realpath $0)
script_dir=$(dirname $script_path)
config_json="$script_dir/ds_config.json"
#python -m torch.distributed.launch --nproc_per_node=$MPSIZE --master_port $MASTER_PORT generate_samples.py \
python3 generate_samples.py \
--DDP-impl none \
--model-parallel-size $MPSIZE \
$MODEL_ARGS \
--fp16 \
--cache-dir cache \
--out-seq-length $MAXSEQLEN \
--seq-length 512 \
--temperature $TEMP \
--top_k $TOPK \
--top_p $TOPP