Muennighoff commited on
Commit
64775bb
1 Parent(s): b757584
sbatch_2b855b55bc4ul2ND.sh ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=2b855b55bc4ul2ndfix
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ # TRAIN_DATA_PATH="/scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
40
+ # VAL_DATA_PATH="/scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
41
+
42
+ TRAIN_DATA_PATH=train55b.txt
43
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
44
+ VALID_DATA_PATH=val.txt
45
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
46
+
47
+ PP_SIZE=1
48
+ TP_SIZE=1
49
+
50
+ MICRO_BATCH_SIZE=2
51
+ GRADIENT_ACCUMULATION_STEPS=1
52
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
53
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
54
+
55
+ # Model parameters
56
+ source model_params.sh
57
+ MODEL_PARAM=("${PARAM_2980M[@]}")
58
+ NHIDDEN=${MODEL_PARAM[0]}
59
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
60
+ KV_SIZE=${MODEL_PARAM[2]}
61
+ NHEADS=${MODEL_PARAM[3]}
62
+ NLAYERS=${MODEL_PARAM[4]}
63
+ SEQ_LEN=2048
64
+
65
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
66
+
67
+ SAVE_INTERVAL=1000
68
+
69
+ # Tokens: 55000000000
70
+ # -> Samples: 26855469
71
+ TRAIN_SAMPLES=26_855_469
72
+
73
+ OPTIMIZER_ARGS=" \
74
+ --optimizer adam \
75
+ --adam-beta1 0.9 \
76
+ --adam-beta2 0.95 \
77
+ --adam-eps 1e-8 \
78
+ --lr 2e-4 \
79
+ --min-lr 2e-5 \
80
+ --lr-decay-style cosine \
81
+ --lr-decay-samples $TRAIN_SAMPLES \
82
+ --lr-warmup-samples 268_555 \
83
+ --clip-grad 1.0 \
84
+ --weight-decay 1e-1 \
85
+ "
86
+
87
+ GPT_ARGS=" \
88
+ --num-layers $NLAYERS \
89
+ --hidden-size $NHIDDEN \
90
+ --num-attention-heads $NHEADS \
91
+ --kv-channels $KV_SIZE \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --seq-length $SEQ_LEN \
94
+ --max-position-embeddings $SEQ_LEN \
95
+ --micro-batch-size $MICRO_BATCH_SIZE \
96
+ --global-batch-size $GLOBAL_BATCH_SIZE \
97
+ --train-samples $TRAIN_SAMPLES \
98
+ --vocab-file $VOCAB_FILE \
99
+ --merge-file $MERGE_FILE \
100
+ --clip-grad 1.0 \
101
+ --kill-switch-path $KILL_SWITCH_PATH \
102
+ --bf16 \
103
+ $OPTIMIZER_ARGS \
104
+ "
105
+
106
+ OUTPUT_ARGS=" \
107
+ --log-interval 10 \
108
+ --save-interval $SAVE_INTERVAL \
109
+ --eval-interval 100 \
110
+ --eval-iters 100 \
111
+ --tensorboard-dir $TENSORBOARD_PATH \
112
+ --tensorboard-queue-size 5 \
113
+ --log-timers-to-tensorboard \
114
+ --log-batch-size-to-tensorboard \
115
+ --log-validation-ppl-to-tensorboard \
116
+ "
117
+
118
+ ZERO_STAGE=0
119
+
120
+ mkdir -p ds_configs
121
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
122
+
123
+ cat <<EOF > $DS_CONFIG_PATH
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "bf16": {
132
+ "enabled": true
133
+ },
134
+ "steps_per_print": 2000,
135
+ "wall_clock_breakdown": false
136
+ }
137
+ EOF
138
+
139
+ DEEPSPEED_ARGS=" \
140
+ --deepspeed \
141
+ --deepspeed_config $DS_CONFIG_PATH \
142
+ --zero-stage $ZERO_STAGE \
143
+ "
144
+
145
+ CMD=" \
146
+ Megatron-DeepSpeed/pretrain_ul2.py \
147
+ --tensor-model-parallel-size $TP_SIZE \
148
+ --pipeline-model-parallel-size $PP_SIZE \
149
+ $GPT_ARGS \
150
+ $OUTPUT_ARGS \
151
+ --save $CHECKPOINT_PATH \
152
+ --load $CHECKPOINT_PATH \
153
+ --data-impl mmap \
154
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
155
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
156
+ $DEEPSPEED_ARGS \
157
+ --ul2-model-type ND \
158
+ --tokenizer-type GPT2BPETokenizer \
159
+ --vocab-extra-ids 3000 \
160
+ "
161
+ # --train-weighted-split-paths-path $TRAIN_DATA_PATH \
162
+ # --valid-weighted-split-paths-path $VALID_DATA_PATH \
163
+ # --data-path $TRAIN_DATA_PATH \
164
+ # --split 949,50,1 \
165
+
166
+ echo $CMD
167
+
168
+ echo "START $SLURM_JOBID: $(date)"
169
+
170
+ # bash launch_srun.sh $CMD
171
+ srun --label launch.sh $CMD
172
+
173
+ echo "END $SLURM_JOBID: $(date)"
sbatch_2b855b55bc4ul2NDNEW.sh ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=2b855b55bc4ul2ndfixnew
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ # TRAIN_DATA_PATH="/scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
40
+ # VAL_DATA_PATH="/scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
41
+
42
+ TRAIN_DATA_PATH=train55b.txt
43
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
44
+ VALID_DATA_PATH=val.txt
45
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
46
+
47
+ PP_SIZE=1
48
+ TP_SIZE=1
49
+
50
+ MICRO_BATCH_SIZE=1
51
+ GRADIENT_ACCUMULATION_STEPS=2
52
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
53
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
54
+
55
+ # Model parameters
56
+ source model_params.sh
57
+ MODEL_PARAM=("${PARAM_2980M[@]}")
58
+ NHIDDEN=${MODEL_PARAM[0]}
59
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
60
+ KV_SIZE=${MODEL_PARAM[2]}
61
+ NHEADS=${MODEL_PARAM[3]}
62
+ NLAYERS=${MODEL_PARAM[4]}
63
+ SEQ_LEN=2048
64
+
65
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
66
+
67
+ SAVE_INTERVAL=1000
68
+
69
+ # Tokens: 55000000000
70
+ # -> Samples: 26855469
71
+ TRAIN_SAMPLES=26_855_469
72
+
73
+ OPTIMIZER_ARGS=" \
74
+ --optimizer adam \
75
+ --adam-beta1 0.9 \
76
+ --adam-beta2 0.95 \
77
+ --adam-eps 1e-8 \
78
+ --lr 2e-4 \
79
+ --min-lr 2e-5 \
80
+ --lr-decay-style cosine \
81
+ --lr-decay-samples $TRAIN_SAMPLES \
82
+ --lr-warmup-samples 268_555 \
83
+ --clip-grad 1.0 \
84
+ --weight-decay 1e-1 \
85
+ "
86
+
87
+ GPT_ARGS=" \
88
+ --num-layers $NLAYERS \
89
+ --hidden-size $NHIDDEN \
90
+ --num-attention-heads $NHEADS \
91
+ --kv-channels $KV_SIZE \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --seq-length $SEQ_LEN \
94
+ --max-position-embeddings $SEQ_LEN \
95
+ --micro-batch-size $MICRO_BATCH_SIZE \
96
+ --global-batch-size $GLOBAL_BATCH_SIZE \
97
+ --train-samples $TRAIN_SAMPLES \
98
+ --vocab-file $VOCAB_FILE \
99
+ --merge-file $MERGE_FILE \
100
+ --clip-grad 1.0 \
101
+ --kill-switch-path $KILL_SWITCH_PATH \
102
+ --bf16 \
103
+ $OPTIMIZER_ARGS \
104
+ "
105
+
106
+ OUTPUT_ARGS=" \
107
+ --log-interval 10 \
108
+ --save-interval $SAVE_INTERVAL \
109
+ --eval-interval 100 \
110
+ --eval-iters 100 \
111
+ --tensorboard-dir $TENSORBOARD_PATH \
112
+ --tensorboard-queue-size 5 \
113
+ --log-timers-to-tensorboard \
114
+ --log-batch-size-to-tensorboard \
115
+ --log-validation-ppl-to-tensorboard \
116
+ "
117
+
118
+ ZERO_STAGE=0
119
+
120
+ mkdir -p ds_configs
121
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
122
+
123
+ cat <<EOF > $DS_CONFIG_PATH
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "bf16": {
132
+ "enabled": true
133
+ },
134
+ "steps_per_print": 2000,
135
+ "wall_clock_breakdown": false
136
+ }
137
+ EOF
138
+
139
+ DEEPSPEED_ARGS=" \
140
+ --deepspeed \
141
+ --deepspeed_config $DS_CONFIG_PATH \
142
+ --zero-stage $ZERO_STAGE \
143
+ "
144
+
145
+ CMD=" \
146
+ Megatron-DeepSpeed/pretrain_ul2.py \
147
+ --tensor-model-parallel-size $TP_SIZE \
148
+ --pipeline-model-parallel-size $PP_SIZE \
149
+ $GPT_ARGS \
150
+ $OUTPUT_ARGS \
151
+ --save $CHECKPOINT_PATH \
152
+ --load $CHECKPOINT_PATH \
153
+ --data-impl mmap \
154
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
155
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
156
+ $DEEPSPEED_ARGS \
157
+ --ul2-model-type ND \
158
+ --tokenizer-type GPT2BPETokenizer \
159
+ --vocab-extra-ids 3000 \
160
+ "
161
+ # --train-weighted-split-paths-path $TRAIN_DATA_PATH \
162
+ # --valid-weighted-split-paths-path $VALID_DATA_PATH \
163
+ # --data-path $TRAIN_DATA_PATH \
164
+ # --split 949,50,1 \
165
+
166
+ echo $CMD
167
+
168
+ echo "START $SLURM_JOBID: $(date)"
169
+
170
+ # bash launch_srun.sh $CMD
171
+ srun --label launch.sh $CMD
172
+
173
+ echo "END $SLURM_JOBID: $(date)"
sbatch_2b855b55bc4ul2new.sh ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=32
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=2b855b55bc4ul2new
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ # TRAIN_DATA_PATH="/scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
40
+ # VAL_DATA_PATH="/scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
41
+
42
+ TRAIN_DATA_PATH=train55b.txt
43
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
44
+ VALID_DATA_PATH=val.txt
45
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
46
+
47
+ PP_SIZE=1
48
+ TP_SIZE=1
49
+
50
+ MICRO_BATCH_SIZE=2
51
+ GRADIENT_ACCUMULATION_STEPS=1
52
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
53
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
54
+
55
+ # Model parameters
56
+ source model_params.sh
57
+ MODEL_PARAM=("${PARAM_2980M[@]}")
58
+ NHIDDEN=${MODEL_PARAM[0]}
59
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
60
+ KV_SIZE=${MODEL_PARAM[2]}
61
+ NHEADS=${MODEL_PARAM[3]}
62
+ NLAYERS=${MODEL_PARAM[4]}
63
+ SEQ_LEN=2048
64
+
65
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
66
+
67
+ SAVE_INTERVAL=1000
68
+
69
+ # Tokens: 55000000000
70
+ # -> Samples: 26855469
71
+ TRAIN_SAMPLES=26_855_469
72
+
73
+ OPTIMIZER_ARGS=" \
74
+ --optimizer adam \
75
+ --adam-beta1 0.9 \
76
+ --adam-beta2 0.95 \
77
+ --adam-eps 1e-8 \
78
+ --lr 2e-4 \
79
+ --min-lr 2e-5 \
80
+ --lr-decay-style cosine \
81
+ --lr-decay-samples $TRAIN_SAMPLES \
82
+ --lr-warmup-samples 268_555 \
83
+ --clip-grad 1.0 \
84
+ --weight-decay 1e-1 \
85
+ "
86
+
87
+ GPT_ARGS=" \
88
+ --num-layers $NLAYERS \
89
+ --hidden-size $NHIDDEN \
90
+ --num-attention-heads $NHEADS \
91
+ --kv-channels $KV_SIZE \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --seq-length $SEQ_LEN \
94
+ --max-position-embeddings $SEQ_LEN \
95
+ --micro-batch-size $MICRO_BATCH_SIZE \
96
+ --global-batch-size $GLOBAL_BATCH_SIZE \
97
+ --train-samples $TRAIN_SAMPLES \
98
+ --vocab-file $VOCAB_FILE \
99
+ --merge-file $MERGE_FILE \
100
+ --clip-grad 1.0 \
101
+ --kill-switch-path $KILL_SWITCH_PATH \
102
+ --bf16 \
103
+ $OPTIMIZER_ARGS \
104
+ "
105
+
106
+ OUTPUT_ARGS=" \
107
+ --log-interval 10 \
108
+ --save-interval $SAVE_INTERVAL \
109
+ --eval-interval 100 \
110
+ --eval-iters 100 \
111
+ --tensorboard-dir $TENSORBOARD_PATH \
112
+ --tensorboard-queue-size 5 \
113
+ --log-timers-to-tensorboard \
114
+ --log-batch-size-to-tensorboard \
115
+ --log-validation-ppl-to-tensorboard \
116
+ "
117
+
118
+ ZERO_STAGE=0
119
+
120
+ mkdir -p ds_configs
121
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
122
+
123
+ cat <<EOF > $DS_CONFIG_PATH
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "bf16": {
132
+ "enabled": true
133
+ },
134
+ "steps_per_print": 2000,
135
+ "wall_clock_breakdown": false
136
+ }
137
+ EOF
138
+
139
+ DEEPSPEED_ARGS=" \
140
+ --deepspeed \
141
+ --deepspeed_config $DS_CONFIG_PATH \
142
+ --zero-stage $ZERO_STAGE \
143
+ "
144
+
145
+ CMD=" \
146
+ Megatron-DeepSpeed/pretrain_ul2.py \
147
+ --tensor-model-parallel-size $TP_SIZE \
148
+ --pipeline-model-parallel-size $PP_SIZE \
149
+ $GPT_ARGS \
150
+ $OUTPUT_ARGS \
151
+ --save $CHECKPOINT_PATH \
152
+ --load $CHECKPOINT_PATH \
153
+ --data-impl mmap \
154
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
155
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
156
+ $DEEPSPEED_ARGS \
157
+ --ul2-model-type CD \
158
+ --tokenizer-type GPT2BPETokenizer \
159
+ --vocab-extra-ids 3000 \
160
+ "
161
+ # --train-weighted-split-paths-path $TRAIN_DATA_PATH \
162
+ # --valid-weighted-split-paths-path $VALID_DATA_PATH \
163
+ # --data-path $TRAIN_DATA_PATH \
164
+ # --split 949,50,1 \
165
+
166
+ echo $CMD
167
+
168
+ echo "START $SLURM_JOBID: $(date)"
169
+
170
+ # bash launch_srun.sh $CMD
171
+ srun --label launch.sh $CMD
172
+
173
+ echo "END $SLURM_JOBID: $(date)"
sbatch_2b855b55bc4ul2newval.sh ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=2b855b55bc4ul2newval
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ # TRAIN_DATA_PATH="/scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
40
+ # VAL_DATA_PATH="/scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
41
+
42
+ TRAIN_DATA_PATH=train55b.txt
43
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
44
+ VALID_DATA_PATH=val.txt
45
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
46
+
47
+ PP_SIZE=1
48
+ TP_SIZE=1
49
+
50
+ MICRO_BATCH_SIZE=2
51
+ GRADIENT_ACCUMULATION_STEPS=1
52
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
53
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
54
+
55
+ # Model parameters
56
+ source model_params.sh
57
+ MODEL_PARAM=("${PARAM_2980M[@]}")
58
+ NHIDDEN=${MODEL_PARAM[0]}
59
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
60
+ KV_SIZE=${MODEL_PARAM[2]}
61
+ NHEADS=${MODEL_PARAM[3]}
62
+ NLAYERS=${MODEL_PARAM[4]}
63
+ SEQ_LEN=2048
64
+
65
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
66
+
67
+ SAVE_INTERVAL=1000
68
+
69
+ # Tokens: 55000000000
70
+ # -> Samples: 26855469
71
+ TRAIN_SAMPLES=26_855_469
72
+
73
+ OPTIMIZER_ARGS=" \
74
+ --optimizer adam \
75
+ --adam-beta1 0.9 \
76
+ --adam-beta2 0.95 \
77
+ --adam-eps 1e-8 \
78
+ --lr 2e-4 \
79
+ --min-lr 2e-5 \
80
+ --lr-decay-style cosine \
81
+ --lr-decay-samples $TRAIN_SAMPLES \
82
+ --lr-warmup-samples 268_555 \
83
+ --clip-grad 1.0 \
84
+ --weight-decay 1e-1 \
85
+ "
86
+
87
+ GPT_ARGS=" \
88
+ --num-layers $NLAYERS \
89
+ --hidden-size $NHIDDEN \
90
+ --num-attention-heads $NHEADS \
91
+ --kv-channels $KV_SIZE \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --seq-length $SEQ_LEN \
94
+ --max-position-embeddings $SEQ_LEN \
95
+ --micro-batch-size $MICRO_BATCH_SIZE \
96
+ --global-batch-size $GLOBAL_BATCH_SIZE \
97
+ --train-samples $TRAIN_SAMPLES \
98
+ --vocab-file $VOCAB_FILE \
99
+ --merge-file $MERGE_FILE \
100
+ --clip-grad 1.0 \
101
+ --kill-switch-path $KILL_SWITCH_PATH \
102
+ --bf16 \
103
+ $OPTIMIZER_ARGS \
104
+ "
105
+
106
+ OUTPUT_ARGS=" \
107
+ --log-interval 10 \
108
+ --save-interval $SAVE_INTERVAL \
109
+ --eval-interval 100 \
110
+ --eval-iters 100 \
111
+ --tensorboard-dir $TENSORBOARD_PATH \
112
+ --tensorboard-queue-size 5 \
113
+ --log-timers-to-tensorboard \
114
+ --log-batch-size-to-tensorboard \
115
+ --log-validation-ppl-to-tensorboard \
116
+ "
117
+
118
+ ZERO_STAGE=0
119
+
120
+ mkdir -p ds_configs
121
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
122
+
123
+ cat <<EOF > $DS_CONFIG_PATH
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "bf16": {
132
+ "enabled": true
133
+ },
134
+ "steps_per_print": 2000,
135
+ "wall_clock_breakdown": false
136
+ }
137
+ EOF
138
+
139
+ DEEPSPEED_ARGS=" \
140
+ --deepspeed \
141
+ --deepspeed_config $DS_CONFIG_PATH \
142
+ --zero-stage $ZERO_STAGE \
143
+ "
144
+
145
+ CMD=" \
146
+ Megatron-DeepSpeed/pretrain_ul2.py \
147
+ --tensor-model-parallel-size $TP_SIZE \
148
+ --pipeline-model-parallel-size $PP_SIZE \
149
+ $GPT_ARGS \
150
+ $OUTPUT_ARGS \
151
+ --save $CHECKPOINT_PATH \
152
+ --load $CHECKPOINT_PATH \
153
+ --data-impl mmap \
154
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
155
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
156
+ $DEEPSPEED_ARGS \
157
+ --ul2-model-type CD \
158
+ --tokenizer-type GPT2BPETokenizer \
159
+ --vocab-extra-ids 3000 \
160
+ "
161
+ # --train-weighted-split-paths-path $TRAIN_DATA_PATH \
162
+ # --valid-weighted-split-paths-path $VALID_DATA_PATH \
163
+ # --data-path $TRAIN_DATA_PATH \
164
+ # --split 949,50,1 \
165
+
166
+ echo $CMD
167
+
168
+ echo "START $SLURM_JOBID: $(date)"
169
+
170
+ # bash launch_srun.sh $CMD
171
+ srun --label launch.sh $CMD
172
+
173
+ echo "END $SLURM_JOBID: $(date)"
sbatch_2b855b55bc4ul2val.sh ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=32
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=2b855b55bc4ul2val
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ # TRAIN_DATA_PATH="/scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
40
+ # VAL_DATA_PATH="/scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
41
+
42
+ TRAIN_DATA_PATH=train55b.txt
43
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
44
+ VALID_DATA_PATH=val.txt
45
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
46
+
47
+ PP_SIZE=1
48
+ TP_SIZE=1
49
+
50
+ MICRO_BATCH_SIZE=2
51
+ GRADIENT_ACCUMULATION_STEPS=1
52
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
53
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
54
+
55
+ # Model parameters
56
+ source model_params.sh
57
+ MODEL_PARAM=("${PARAM_2980M[@]}")
58
+ NHIDDEN=${MODEL_PARAM[0]}
59
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
60
+ KV_SIZE=${MODEL_PARAM[2]}
61
+ NHEADS=${MODEL_PARAM[3]}
62
+ NLAYERS=${MODEL_PARAM[4]}
63
+ SEQ_LEN=2048
64
+
65
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
66
+
67
+ SAVE_INTERVAL=1000
68
+
69
+ # Tokens: 55000000000
70
+ # -> Samples: 26855469
71
+ TRAIN_SAMPLES=26_855_469
72
+
73
+ OPTIMIZER_ARGS=" \
74
+ --optimizer adam \
75
+ --adam-beta1 0.9 \
76
+ --adam-beta2 0.95 \
77
+ --adam-eps 1e-8 \
78
+ --lr 2e-4 \
79
+ --min-lr 2e-5 \
80
+ --lr-decay-style cosine \
81
+ --lr-decay-samples $TRAIN_SAMPLES \
82
+ --lr-warmup-samples 268_555 \
83
+ --clip-grad 1.0 \
84
+ --weight-decay 1e-1 \
85
+ "
86
+
87
+ GPT_ARGS=" \
88
+ --num-layers $NLAYERS \
89
+ --hidden-size $NHIDDEN \
90
+ --num-attention-heads $NHEADS \
91
+ --kv-channels $KV_SIZE \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --seq-length $SEQ_LEN \
94
+ --max-position-embeddings $SEQ_LEN \
95
+ --micro-batch-size $MICRO_BATCH_SIZE \
96
+ --global-batch-size $GLOBAL_BATCH_SIZE \
97
+ --train-samples $TRAIN_SAMPLES \
98
+ --vocab-file $VOCAB_FILE \
99
+ --merge-file $MERGE_FILE \
100
+ --clip-grad 1.0 \
101
+ --kill-switch-path $KILL_SWITCH_PATH \
102
+ --bf16 \
103
+ $OPTIMIZER_ARGS \
104
+ "
105
+
106
+ OUTPUT_ARGS=" \
107
+ --log-interval 10 \
108
+ --save-interval $SAVE_INTERVAL \
109
+ --eval-interval 100 \
110
+ --eval-iters 100 \
111
+ --tensorboard-dir $TENSORBOARD_PATH \
112
+ --tensorboard-queue-size 5 \
113
+ --log-timers-to-tensorboard \
114
+ --log-batch-size-to-tensorboard \
115
+ --log-validation-ppl-to-tensorboard \
116
+ "
117
+
118
+ ZERO_STAGE=0
119
+
120
+ mkdir -p ds_configs
121
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
122
+
123
+ cat <<EOF > $DS_CONFIG_PATH
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "bf16": {
132
+ "enabled": true
133
+ },
134
+ "steps_per_print": 2000,
135
+ "wall_clock_breakdown": false
136
+ }
137
+ EOF
138
+
139
+ DEEPSPEED_ARGS=" \
140
+ --deepspeed \
141
+ --deepspeed_config $DS_CONFIG_PATH \
142
+ --zero-stage $ZERO_STAGE \
143
+ "
144
+
145
+ CMD=" \
146
+ Megatron-DeepSpeed/pretrain_ul2.py \
147
+ --tensor-model-parallel-size $TP_SIZE \
148
+ --pipeline-model-parallel-size $PP_SIZE \
149
+ $GPT_ARGS \
150
+ $OUTPUT_ARGS \
151
+ --save $CHECKPOINT_PATH \
152
+ --load $CHECKPOINT_PATH \
153
+ --data-impl mmap \
154
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
155
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
156
+ $DEEPSPEED_ARGS \
157
+ --ul2-model-type CD \
158
+ --tokenizer-type GPT2BPETokenizer \
159
+ --vocab-extra-ids 3000 \
160
+ "
161
+ # --train-weighted-split-paths-path $TRAIN_DATA_PATH \
162
+ # --valid-weighted-split-paths-path $VALID_DATA_PATH \
163
+ # --data-path $TRAIN_DATA_PATH \
164
+ # --split 949,50,1 \
165
+
166
+ echo $CMD
167
+
168
+ echo "START $SLURM_JOBID: $(date)"
169
+
170
+ # bash launch_srun.sh $CMD
171
+ srun --label launch.sh $CMD
172
+
173
+ echo "END $SLURM_JOBID: $(date)"
sbatch_2b855b55bc4ul2valfast.sh ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=32
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=2b855b55bc4ul2valfast
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ # TRAIN_DATA_PATH="/scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
40
+ # VAL_DATA_PATH="/scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
41
+
42
+ TRAIN_DATA_PATH=train55b.txt
43
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4_subsampled/gpt2tok_c4_en_55B_text_document"
44
+ VALID_DATA_PATH=val.txt
45
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
46
+
47
+ PP_SIZE=1
48
+ TP_SIZE=1
49
+
50
+ MICRO_BATCH_SIZE=2
51
+ GRADIENT_ACCUMULATION_STEPS=1
52
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
53
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
54
+
55
+ # Model parameters
56
+ source model_params.sh
57
+ MODEL_PARAM=("${PARAM_2980M[@]}")
58
+ NHIDDEN=${MODEL_PARAM[0]}
59
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
60
+ KV_SIZE=${MODEL_PARAM[2]}
61
+ NHEADS=${MODEL_PARAM[3]}
62
+ NLAYERS=${MODEL_PARAM[4]}
63
+ SEQ_LEN=2048
64
+
65
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
66
+
67
+ SAVE_INTERVAL=1000
68
+
69
+ # Tokens: 55000000000
70
+ # -> Samples: 26855469
71
+ TRAIN_SAMPLES=26_855_469
72
+
73
+ OPTIMIZER_ARGS=" \
74
+ --optimizer adam \
75
+ --adam-beta1 0.9 \
76
+ --adam-beta2 0.95 \
77
+ --adam-eps 1e-8 \
78
+ --lr 2e-4 \
79
+ --min-lr 2e-5 \
80
+ --lr-decay-style cosine \
81
+ --lr-decay-samples $TRAIN_SAMPLES \
82
+ --lr-warmup-samples 268_555 \
83
+ --clip-grad 1.0 \
84
+ --weight-decay 1e-1 \
85
+ "
86
+
87
+ GPT_ARGS=" \
88
+ --num-layers $NLAYERS \
89
+ --hidden-size $NHIDDEN \
90
+ --num-attention-heads $NHEADS \
91
+ --kv-channels $KV_SIZE \
92
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
93
+ --seq-length $SEQ_LEN \
94
+ --max-position-embeddings $SEQ_LEN \
95
+ --micro-batch-size $MICRO_BATCH_SIZE \
96
+ --global-batch-size $GLOBAL_BATCH_SIZE \
97
+ --train-samples $TRAIN_SAMPLES \
98
+ --vocab-file $VOCAB_FILE \
99
+ --merge-file $MERGE_FILE \
100
+ --clip-grad 1.0 \
101
+ --kill-switch-path $KILL_SWITCH_PATH \
102
+ --bf16 \
103
+ $OPTIMIZER_ARGS \
104
+ "
105
+
106
+ OUTPUT_ARGS=" \
107
+ --log-interval 10 \
108
+ --save-interval $SAVE_INTERVAL \
109
+ --eval-interval 100 \
110
+ --eval-iters 100 \
111
+ --tensorboard-dir $TENSORBOARD_PATH \
112
+ --tensorboard-queue-size 5 \
113
+ --log-timers-to-tensorboard \
114
+ --log-batch-size-to-tensorboard \
115
+ --log-validation-ppl-to-tensorboard \
116
+ "
117
+
118
+ ZERO_STAGE=0
119
+
120
+ mkdir -p ds_configs
121
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
122
+
123
+ cat <<EOF > $DS_CONFIG_PATH
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "bf16": {
132
+ "enabled": true
133
+ },
134
+ "steps_per_print": 2000,
135
+ "wall_clock_breakdown": false
136
+ }
137
+ EOF
138
+
139
+ DEEPSPEED_ARGS=" \
140
+ --deepspeed \
141
+ --deepspeed_config $DS_CONFIG_PATH \
142
+ --zero-stage $ZERO_STAGE \
143
+ "
144
+
145
+ CMD=" \
146
+ Megatron-DeepSpeed/pretrain_ul2.py \
147
+ --tensor-model-parallel-size $TP_SIZE \
148
+ --pipeline-model-parallel-size $PP_SIZE \
149
+ $GPT_ARGS \
150
+ $OUTPUT_ARGS \
151
+ --save $CHECKPOINT_PATH \
152
+ --load $CHECKPOINT_PATH \
153
+ --data-impl mmap \
154
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
155
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
156
+ $DEEPSPEED_ARGS \
157
+ --ul2-model-type CD \
158
+ --tokenizer-type GPT2BPETokenizer \
159
+ --vocab-extra-ids 3000 \
160
+ "
161
+ # --train-weighted-split-paths-path $TRAIN_DATA_PATH \
162
+ # --valid-weighted-split-paths-path $VALID_DATA_PATH \
163
+ # --data-path $TRAIN_DATA_PATH \
164
+ # --split 949,50,1 \
165
+
166
+ echo $CMD
167
+
168
+ echo "START $SLURM_JOBID: $(date)"
169
+
170
+ # bash launch_srun.sh $CMD
171
+ srun --label launch.sh $CMD
172
+
173
+ echo "END $SLURM_JOBID: $(date)"