File size: 5,974 Bytes
a93e458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# This script will try to run a task *outside* any specified submitter
# Note: This script is for archival; it is not actually run by ducttape
# unset CUDA_VISIBLE_DEVICES
echo $CUDA_VISIBLE_DEVICES

data_path="1 spgi_vox_mls_text_1b/data/data_text_document"
megatron_model="spgi_vox_mls_text_1b/shards"
model_dir="spgi_vox_mls_text_1b/ckpt"
tokenizer_path="spgi_vox_mls_text_1b/new_extended_tokenizer/tokenizer.model"
tp="2"
pp="1"

# --wandb_logger \
# --wandb_id "hajmola" \
# --wandb_project "Megatron" \
# --wandb_entity "hajmola" \
# --wandb_api_key "c4a95af43e910d14b0eca23fbb8165f94944d5af" \

# optimization arguments; self-explanatory. Intervals and steps are in terms of training optimizer steps
grad_accum_steps="12"
micro_batch_size="12"
warmup_steps="13"
eval_interval="500"
lr="3e-5" #lr="3e-5"
log_interval="10"
lr_min="3e-6" #lr_min="3e-6"
lr_scheduler="cosine"

# infra arguments 
save_interval="250"
n_gpus="2"
repo="multilinguality_megatron"
gpu_ids="4,5"
train_steps="1000"


# Parse command-line arguments
for arg in "$@"
do
    case $arg in
        --help)
        echo "Usage: ./script.sh [OPTIONS]"
        echo "Options:"
        echo "  --data_path=PATH       Path to dataset. Should have the form of <integer_0> <PATH_TO_DATA_TEXT_DOCUMENT_0> <integer_1> <PATH_TO_DATA_TEXT_DOCUMENT_1> ..., where the integers determine the data's relative weight in the training set. If every integer is equal, then the data is uniformly sampled."
        echo "  --megatron_model=PATH  Path to sharded megatron model"
        echo "  --model_dir=PATH       folder to save model checkpoints; if this has a checkpoint, it will be used to continue training"
        echo "  --tokenizer_path=PATH  Path to tokenizer.model of original HF model"
        echo "  --tp=NUMBER            Number of shards model is divided in"
        echo "  --pp=NUMBER            Pipeline parallel (default is 1)"
        echo "  --grad_accum_steps=NUMBER"
        echo "                         Number of gradient accumulation steps"
        echo "  --micro_batch_size=NUMBER"
        echo "                         Micro batch size"
        echo "  --warmup_steps=NUMBER  Number of warmup steps"
        echo "  --eval_interval=NUMBER Number of steps between validations"
        echo "  --lr=NUMBER            Learning rate"
        echo "  --log_interval=NUMBER  Number of steps between logging"
        echo "  --lr_min=NUMBER        Minimum learning rate of scheduler"
        echo "  --lr_scheduler=STRING  Learning rate scheduler"
        echo "  --save_interval=NUMBER Number of steps between saves"
        echo "  --n_gpus=NUMBER        Number of GPUs to use"
        echo "  --repo=PATH            Path to repo"
        echo "  --gpu_ids=STRING       GPU IDs to use"
        echo "  --train_steps=NUMBER   Number of training steps"
        exit 0
        ;;
        --data_path=*)
        data_path="${arg#*=}"
        shift
        ;;
        --megatron_model=*)
        megatron_model="${arg#*=}"
        shift
        ;;
        --model_dir=*)
        model_dir="${arg#*=}"
        shift
        ;;
        --tokenizer_path=*)
        tokenizer_path="${arg#*=}"
        shift
        ;;
        --tp=*)
        tp="${arg#*=}"
        shift
        ;;
        --pp=*)
        pp="${arg#*=}"
        shift
        ;;
        --grad_accum_steps=*)
        grad_accum_steps="${arg#*=}"
        shift
        ;;
        --micro_batch_size=*)
        micro_batch_size="${arg#*=}"
        shift
        ;;
        --warmup_steps=*)
        warmup_steps="${arg#*=}"
        shift
        ;;
        --eval_interval=*)
        eval_interval="${arg#*=}"
        shift
        ;;
        --lr=*)
        lr="${arg#*=}"
        shift
        ;;
        --log_interval=*)
        log_interval="${arg#*=}"
        shift
        ;;
        --lr_min=*)
        lr_min="${arg#*=}"
        shift
        ;;
        --lr_scheduler=*)
        lr_scheduler="${arg#*=}"
        shift
        ;;
        --save_interval=*)
        save_interval="${arg#*=}"
        shift
        ;;
        --n_gpus=*)
        n_gpus="${arg#*=}"
        shift
        ;;
        --repo=*)
        repo="${arg#*=}"
        shift
        ;;
        --gpu_ids=*)
        gpu_ids="${arg#*=}"
        shift
        ;;
        --train_steps=*)
        train_steps="${arg#*=}"
        shift
        ;;
    esac
done

# CUDA_VISIBLE_DEVICES=$gpu_ids

if [ "$model_dir" != "" ]; then
    mkdir -p $model_dir
    mkdir -p $model_dir/runs
fi

ckpt_flag=$model_dir/latest_checkpointed_iteration.txt
if [ -f $ckpt_flag ]; then
    megatron_model=$model_dir
    echo Loading from previously saved checkpoint.
fi

global_batch_size=$(($micro_batch_size * $n_gpus * $grad_accum_steps))

LOG_ARGS="--log_interval $log_interval --save_interval $save_interval --eval_interval $eval_interval"
TRAIN_ARGS="--train_iters $train_steps --lr_decay_style $lr_scheduler --lr_warmup_iters $warmup_steps --lr $lr --min_lr $lr_min"
DISTRIBUTED_ARGS="--nproc_per_node $n_gpus --nnodes 1 --node_rank 0 --master_addr localhost --master_port 50000"
COMMON_ARGS="--hidden_dropout 0.0 --attention_dropout 0.0 --no_bias_gelu_fusion"
LLAMA_ARGS="--use_rms_norm --glu_activation swiglu --no_tie_embed_logits --no_new_tokens --layernorm_epsilon 1e-5"
CUDA_DEVICE_MAX_CONNECTIONS=1 torchrun $DISTRIBUTED_ARGS $repo/finetune.py \
    --tensor_model_parallel_size $tp \
    --pipeline_model_parallel_size $pp \
    --load $megatron_model \
    --save $model_dir \
    --tensorboard_dir $model_dir/runs \
    --data_path $data_path \
    --model_name llama \
    --tokenizer_type SentencePieceTokenizer \
    --vocab_file=$tokenizer_path \
    --bf16 \
    --use_flash_attn \
    --micro_batch_size $micro_batch_size \
    --global_batch_size $global_batch_size \
    --sequence_parallel \
    --recompute_granularity selective \
    --use_checkpoint_args \
    --seq_length 2048 \
    --split 99,1,1 \
    $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA_ARGS