Muennighoff commited on
Commit
80b7065
1 Parent(s): 5c8710b

Create tr13-176B-ml-p31lossseq

Browse files
Files changed (1) hide show
  1. tr13-176B-ml-p31lossseq +209 -0
tr13-176B-ml-p31lossseq ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=taskytr13f-6B3-ml-t0
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --qos=qos_gpu-gc # up to 100h
6
+ #SBATCH --nodes=8
7
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
8
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
9
+ #SBATCH --hint=nomultithread # we get physical cores not logical
10
+ #SBATCH --gres=gpu:8 # number of gpus
11
+ #SBATCH --time 12:00:00 # maximum execution time (HH:MM:SS)
12
+ #SBATCH --output=%x-%j.out # output file name
13
+ #SBATCH --account=ajs@a100
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
18
+ echo "START TIME: $(date)"
19
+
20
+ variant=tasky
21
+
22
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0
23
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
24
+ REPO_PATH=$DATA_OUTPUT_PATH/tr13f-6B3-ml-t0-logs
25
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
26
+ LOGS_PATH=$REPO_PATH/logs/$variant
27
+ mkdir -p $LOGS_PATH
28
+ mkdir -p $TENSORBOARD_PATH
29
+
30
+ MEGATRON_DEEPSPEED_REPO=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/megdslossseq/Megatron-DeepSpeed
31
+ cd $MEGATRON_DEEPSPEED_REPO
32
+
33
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr13f-6B3-mtf
34
+
35
+ TRAIN_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/tasky_train.txt
36
+ VALID_DATA_PATH=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed/data/p31_validation.txt
37
+ TOKENIZER_NAME_OR_PATH=bigscience/tokenizer
38
+
39
+ # defining the right environment variables
40
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
41
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
42
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
43
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
44
+ export HF_DATASETS_OFFLINE=1
45
+ export TRANSFORMERS_OFFLINE=1
46
+
47
+ # testing for potential faulty nodes
48
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
49
+
50
+ # so processes know who to talk to
51
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
52
+ MASTER_PORT=6001
53
+
54
+ GPUS_PER_NODE=8
55
+ NNODES=$SLURM_NNODES
56
+
57
+ PP_SIZE=1
58
+ TP_SIZE=1
59
+
60
+ # T0 paper:
61
+ # ...truncate input and target sequences to 1024 and 256 tokens...
62
+ # ...use a batch size of 1024 sequences ... 2^20 total input tokens per batch...
63
+ # We use 2048 total tokens and 512 batch size = 2**20
64
+ MICRO_BATCH_SIZE=4
65
+ GLOBAL_BATCH_SIZE=2048
66
+
67
+ NLAYERS=30
68
+ NHIDDEN=4096
69
+ NHEADS=32
70
+ SEQ_LEN=2048
71
+
72
+ SAVE_INTERVAL=250
73
+
74
+ TRAIN_SAMPLES=6_348_800
75
+
76
+ # T0 paper:
77
+ # "...we use a learning rate of 1e-3..."
78
+ # However, they use Adafactor, which adapts the LR
79
+ # For Adam we likely want a lower one
80
+ # FLAN:
81
+ # "...decay of 1e-4..""
82
+
83
+ # Uncomment for the first step
84
+ # --no-load-optim \
85
+ OPTIMIZER_ARGS=" \
86
+ --optimizer adam \
87
+ --adam-beta1 0.9 \
88
+ --adam-beta2 0.95 \
89
+ --adam-eps 1e-8 \
90
+ --lr 2e-5 \
91
+ --lr-decay-style constant \
92
+ --lr-warmup-samples 0 \
93
+ --clip-grad 1.0 \
94
+ --weight-decay 1e-4 \
95
+ --no-load-optim \
96
+ "
97
+ # for 20h 1190, for 100h 5990
98
+ # --exit-duration-in-mins 1190 \
99
+ EXIT_OPTS=" \
100
+ --exit-duration-in-mins 5990 \
101
+ "
102
+
103
+ GPT_ARGS=" \
104
+ --pp-partition-method 'type:transformer|embedding' \
105
+ --num-layers $NLAYERS \
106
+ --hidden-size $NHIDDEN \
107
+ --num-attention-heads $NHEADS \
108
+ --seq-length $SEQ_LEN \
109
+ --max-position-embeddings $SEQ_LEN \
110
+ --micro-batch-size $MICRO_BATCH_SIZE \
111
+ --global-batch-size $GLOBAL_BATCH_SIZE \
112
+ --train-samples $TRAIN_SAMPLES \
113
+ --tokenizer-type PretrainedFromHF \
114
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
115
+ --init-method-std 0.0048 \
116
+ --embed-layernorm \
117
+ --fp16 \
118
+ --seed 42 \
119
+ --position-embedding-type alibi \
120
+ --checkpoint-activations \
121
+ --abort-on-unmet-fused-kernel-constraints \
122
+ --kill-switch-path $KILL_SWITCH_PATH \
123
+ --pad-vocab-size-to 250880 \
124
+ $OPTIMIZER_ARGS \
125
+ $EXIT_OPTS \
126
+ "
127
+
128
+ OUTPUT_ARGS=" \
129
+ --log-interval 1 \
130
+ --save-interval $SAVE_INTERVAL \
131
+ --eval-interval 250 \
132
+ --eval-iters 50 \
133
+ --tensorboard-dir $TENSORBOARD_PATH \
134
+ --tensorboard-queue-size 5 \
135
+ --log-timers-to-tensorboard \
136
+ --log-batch-size-to-tensorboard \
137
+ --log-validation-ppl-to-tensorboard \
138
+ "
139
+
140
+ ZERO_STAGE=1
141
+
142
+ config_json="./ds_config.$SLURM_JOBID.json"
143
+
144
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
145
+ cat <<EOT > $config_json
146
+ {
147
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
148
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
149
+ "gradient_clipping": 1.0,
150
+ "zero_optimization": {
151
+ "stage": $ZERO_STAGE
152
+ },
153
+ "fp16": {
154
+ "enabled": true,
155
+ "loss_scale": 0,
156
+ "loss_scale_window": 500,
157
+ "hysteresis": 2,
158
+ "min_loss_scale": 1,
159
+ "initial_scale_power": 12
160
+ },
161
+ "steps_per_print": 2000,
162
+ "wall_clock_breakdown": false
163
+ }
164
+ EOT
165
+
166
+
167
+ DEEPSPEED_ARGS=" \
168
+ --deepspeed \
169
+ --deepspeed_config ${config_json} \
170
+ --zero-stage ${ZERO_STAGE} \
171
+ --deepspeed-activation-checkpointing \
172
+ "
173
+
174
+ export LAUNCHER="python -u -m torch.distributed.run \
175
+ --nproc_per_node $GPUS_PER_NODE \
176
+ --nnodes $NNODES \
177
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
178
+ --rdzv_backend c10d \
179
+ --max_restarts 0 \
180
+ --tee 3 \
181
+ "
182
+
183
+ export CMD=" \
184
+ `pwd`/finetune_t0.py \
185
+ --tensor-model-parallel-size $TP_SIZE \
186
+ --pipeline-model-parallel-size $PP_SIZE \
187
+ $GPT_ARGS \
188
+ $OUTPUT_ARGS \
189
+ --save $CHECKPOINT_PATH \
190
+ --load $CHECKPOINT_PATH \
191
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
192
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
193
+ --dataloader-type single \
194
+ --data-impl mmap \
195
+ --distributed-backend nccl \
196
+ $DEEPSPEED_ARGS \
197
+ "
198
+
199
+ echo $CMD
200
+
201
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
202
+ export CUDA_LAUNCH_BLOCKING=1
203
+
204
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
205
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
206
+
207
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
208
+
209
+ echo "END TIME: $(date)"