Muennighoff commited on
Commit
7b58bc5
1 Parent(s): 9891f02

Create sbatch_8b7_178b_25b_jz_tmp.sh

Browse files
Files changed (1) hide show
  1. sbatch_8b7_178b_25b_jz_tmp.sh +170 -0
sbatch_8b7_178b_25b_jz_tmp.sh ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007052,nid005560
3
+ #SBATCH --nodes=64
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=8b7178b
17
+
18
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
19
+ CHECKPOINT_PATH=checkpoints_$VARIANT
20
+ TENSORBOARD_PATH=tensorboard_$VARIANT
21
+
22
+
23
+
24
+ ### JEANZAY / NVIDIA ###
25
+ set -x -e
26
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
27
+
28
+
29
+
30
+ ### LUMI / AMD ###
31
+ # if run without sbatch, invoke here
32
+ if [ -z $SLURM_JOB_ID ]; then
33
+ mkdir -p logs
34
+ sbatch "$0"
35
+ exit
36
+ fi
37
+
38
+ set -euo pipefail
39
+
40
+ # symlink logs/latest.out and logs/latest.err
41
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
42
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
43
+
44
+ # Data
45
+ VOCAB_FILE="gpt2/vocab.json"
46
+ MERGE_FILE="gpt2/merges.txt"
47
+ DATA_PATH="/scratch/project_462000119/data/pile/megatron_data/meg-gpt2_pile_text_document"
48
+ DATA_PATH="/scratch/project_462000119/data"
49
+ DATA_PATH="/scratch/project_462000119/data/oscar_megatron/gpt2tok_oscar_text_document"
50
+
51
+ PP_SIZE=2
52
+ TP_SIZE=1
53
+
54
+ MICRO_BATCH_SIZE=2
55
+ GRADIENT_ACCUMULATION_STEPS=1
56
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
57
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
58
+
59
+ # Model parameters
60
+ source model_params.sh
61
+ MODEL_PARAM=("${PARAM_9293M[@]}")
62
+ NHIDDEN=${MODEL_PARAM[0]}
63
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
64
+ KV_SIZE=${MODEL_PARAM[2]}
65
+ NHEADS=${MODEL_PARAM[3]}
66
+ NLAYERS=${MODEL_PARAM[4]}
67
+ SEQ_LEN=2048
68
+
69
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
70
+
71
+ SAVE_INTERVAL=10000
72
+
73
+ # Tokens: 178000000000
74
+ # -> Samples: 86914062
75
+ TRAIN_SAMPLES=86_914_062
76
+
77
+ OPTIMIZER_ARGS=" \
78
+ --optimizer adam \
79
+ --adam-beta1 0.9 \
80
+ --adam-beta2 0.999 \
81
+ --adam-eps 1e-8 \
82
+ --lr 2e-4 \
83
+ --min-lr 2e-5 \
84
+ --lr-decay-style cosine \
85
+ --lr-decay-samples $TRAIN_SAMPLES \
86
+ --lr-warmup-samples 869_140 \
87
+ --clip-grad 1.0 \
88
+ --weight-decay 1e-1 \
89
+ "
90
+
91
+ GPT_ARGS=" \
92
+ --num-layers $NLAYERS \
93
+ --hidden-size $NHIDDEN \
94
+ --num-attention-heads $NHEADS \
95
+ --kv-channels $KV_SIZE \
96
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
97
+ --seq-length $SEQ_LEN \
98
+ --max-position-embeddings $SEQ_LEN \
99
+ --micro-batch-size $MICRO_BATCH_SIZE \
100
+ --global-batch-size $GLOBAL_BATCH_SIZE \
101
+ --train-samples $TRAIN_SAMPLES \
102
+ --vocab-file $VOCAB_FILE \
103
+ --merge-file $MERGE_FILE \
104
+ --clip-grad 1.0 \
105
+ --kill-switch-path $KILL_SWITCH_PATH \
106
+ --bf16 \
107
+ $OPTIMIZER_ARGS \
108
+ "
109
+
110
+ OUTPUT_ARGS=" \
111
+ --log-interval 10 \
112
+ --save-interval $SAVE_INTERVAL \
113
+ --eval-interval 1000 \
114
+ --eval-iters 1 \
115
+ --tensorboard-dir $TENSORBOARD_PATH \
116
+ --tensorboard-queue-size 5 \
117
+ --log-timers-to-tensorboard \
118
+ --log-batch-size-to-tensorboard \
119
+ --log-validation-ppl-to-tensorboard \
120
+ "
121
+
122
+ ZERO_STAGE=0
123
+
124
+ mkdir -p ds_configs
125
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
126
+
127
+ cat <<EOF > $DS_CONFIG_PATH
128
+ {
129
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
130
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
131
+ "gradient_clipping": 1.0,
132
+ "zero_optimization": {
133
+ "stage": $ZERO_STAGE
134
+ },
135
+ "bf16": {
136
+ "enabled": true
137
+ },
138
+ "steps_per_print": 2000,
139
+ "wall_clock_breakdown": false
140
+ }
141
+ EOF
142
+
143
+ DEEPSPEED_ARGS=" \
144
+ --deepspeed \
145
+ --deepspeed_config $DS_CONFIG_PATH \
146
+ --zero-stage $ZERO_STAGE \
147
+ "
148
+
149
+ CMD=" \
150
+ Megatron-DeepSpeed/pretrain_gpt.py \
151
+ --tensor-model-parallel-size $TP_SIZE \
152
+ --pipeline-model-parallel-size $PP_SIZE \
153
+ $GPT_ARGS \
154
+ $OUTPUT_ARGS \
155
+ --save $CHECKPOINT_PATH \
156
+ --load $CHECKPOINT_PATH \
157
+ --data-path $DATA_PATH \
158
+ --data-impl mmap \
159
+ --split 949,50,1 \
160
+ $DEEPSPEED_ARGS \
161
+ "
162
+
163
+ echo $CMD
164
+
165
+ echo "START $SLURM_JOBID: $(date)"
166
+
167
+ # bash launch_srun.sh $CMD
168
+ srun --label launch.sh $CMD
169
+
170
+ echo "END $SLURM_JOBID: $(date)"