Muennighoff commited on
Commit
ed18467
1 Parent(s): 2a86343
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. sbatch_4b284b12boscar.sh +163 -0
  2. sbatch_4b284b17boscar.sh +163 -0
  3. sbatch_4b284b21boscar.sh +163 -0
  4. sbatch_4b284b28boscar.sh +163 -0
  5. sbatch_4b284b42boscar.sh +163 -0
  6. sbatch_4b284b84boscar.sh +163 -0
  7. sbatch_4b284b84boscarv2.sh +163 -0
  8. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1675856570.nid006263.130294.0 +3 -0
  9. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1675856819.nid005294.21710.0 +3 -0
  10. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1675856960.nid005294.25660.0 +3 -0
  11. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1675858197.nid005550.30902.0 +3 -0
  12. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676110974.nid006477.93231.0 +3 -0
  13. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676111235.nid006473.99660.0 +3 -0
  14. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676111486.nid006075.4896.0 +3 -0
  15. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676111753.nid006299.74298.0 +3 -0
  16. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676112010.nid006207.54676.0 +3 -0
  17. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676112231.nid006293.58630.0 +3 -0
  18. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676112569.nid006299.86070.0 +3 -0
  19. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676112794.nid006299.92926.0 +3 -0
  20. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676142403.nid005382.129911.0 +3 -0
  21. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676144435.nid006070.114854.0 +3 -0
  22. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676144612.nid006070.118769.0 +3 -0
  23. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676144755.nid005382.14084.0 +3 -0
  24. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676146757.nid005695.2381.0 +3 -0
  25. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676146910.nid005695.6476.0 +3 -0
  26. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147023.nid005695.10153.0 +3 -0
  27. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147183.nid005695.14015.0 +3 -0
  28. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147337.nid005695.17432.0 +3 -0
  29. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147490.nid005695.21295.0 +3 -0
  30. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147650.nid005382.34778.0 +3 -0
  31. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152377.nid005695.69797.0 +3 -0
  32. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152542.nid006070.43023.0 +3 -0
  33. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152666.nid006070.46458.0 +3 -0
  34. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152812.nid007236.128492.0 +3 -0
  35. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152962.nid007236.2282.0 +3 -0
  36. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676153132.nid006070.59365.0 +3 -0
  37. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676153307.nid006070.62963.0 +3 -0
  38. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676195864.nid006255.7086.0 +3 -0
  39. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676197952.nid006255.20098.0 +3 -0
  40. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676199898.nid006255.36081.0 +3 -0
  41. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676201997.nid006255.49355.0 +3 -0
  42. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676203967.nid005381.105183.0 +3 -0
  43. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676222207.nid006618.79559.0 +3 -0
  44. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676222711.nid007035.120629.0 +3 -0
  45. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676223197.nid007035.126064.0 +3 -0
  46. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676223674.nid007217.102216.0 +3 -0
  47. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676224159.nid006997.52675.0 +3 -0
  48. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676224640.nid006997.58319.0 +3 -0
  49. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676225130.nid006997.66088.0 +3 -0
  50. tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676225618.nid006997.71542.0 +3 -0
sbatch_4b284b12boscar.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b12boscar
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train12boscar.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/oscar_subsampled/gpt2tok_oscar_en_12B_text_document"
41
+ VALID_DATA_PATH=valc4oscar.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b17boscar.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b17boscar
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train17boscar.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/oscar_subsampled/gpt2tok_oscar_en_17B_text_document"
41
+ VALID_DATA_PATH=valc4oscar.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b21boscar.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b21boscar
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train21boscar.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/oscar_subsampled/gpt2tok_oscar_en_21B_text_document"
41
+ VALID_DATA_PATH=valc4oscar.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b28boscar.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b28boscar
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train28boscar.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/oscar_subsampled/gpt2tok_oscar_en_28B_text_document"
41
+ VALID_DATA_PATH=valc4oscar.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b42boscar.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b42boscar
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train42boscar.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/oscar_subsampled/gpt2tok_oscar_en_42B_text_document"
41
+ VALID_DATA_PATH=valc4oscar.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84boscar.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84boscar
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=trainoscar.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/oscar_megatron/gpt2tok_oscar_text_document"
41
+ VALID_DATA_PATH=valc4oscar.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284b84boscarv2.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --exclude=nid007571,nid007112,nid006774,nid007502,nid007506,nid007507,nid005145,nid006692,nid007218,nid007123,nid006124,nid006123,nid007496,nid007237,nid006852,nid007206,nid006947,nid007212,nid006977,nid007222,nid005444,nid007219,nid007493,nid007221,nid005300,nid005619,nid006118,nid005203,nid006113,nid006481,nid007077,nid005208,nid005207,nid005879,nid005901
3
+ #SBATCH --nodes=32
4
+ #SBATCH --ntasks-per-node=1
5
+ #SBATCH --cpus-per-task=40
6
+ #SBATCH --mem=256G
7
+ #SBATCH -p standard-g
8
+ #SBATCH -t 48:00:00
9
+ #SBATCH --gpus-per-node=mi250:8
10
+ #SBATCH --exclusive=user
11
+ #SBATCH --hint=nomultithread
12
+ #SBATCH --account=project_462000119
13
+ #SBATCH -o logs/%j.out
14
+ #SBATCH -e logs/%j.err
15
+
16
+ VARIANT=4b284b84boscarv2
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+
39
+ TRAIN_DATA_PATH=train84boscar.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/oscar_subsampled/gpt2tok_oscar_en_84B_text_document"
41
+ VALID_DATA_PATH=valc4oscar.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_4516M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1675856570.nid006263.130294.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05ceb8811f0559480a59efb0565cf1ca8aa21cbfc1815f31ae224edfe5ebc000
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1675856819.nid005294.21710.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7455472269f8784b78484b8ae9293d185f487562338b571a288dff4e5743366f
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1675856960.nid005294.25660.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ab5ec129d35d42ec2d6f60936bc7279a8bbd9094ddf01346cf2a98c2d5651b8
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1675858197.nid005550.30902.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e61f4f0e66569a26b81042246fe77154f42d95a0eac66c3e529b284ec7edd8f
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676110974.nid006477.93231.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a107f46e4619bdb593d742f2267aefb57f64ec83349ce5daf38201144fdc7a9
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676111235.nid006473.99660.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dfcf386f78fe1d83e7591514e86f7d29d7a3dd2704e5889ff585885622fa444
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676111486.nid006075.4896.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bc71bcc1b422476099dad7145b73f20090058b87a5c051c5223e939901e456c
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676111753.nid006299.74298.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e924b831347694452da891ecc0588d6a5642986fa57cd24f00d3480cca26ff49
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676112010.nid006207.54676.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff77dacb9f4d895ba51215bcfeb2e4151988e9017722a57ccad27c062c1e5303
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676112231.nid006293.58630.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89acfdd2b5262e544a82e689be47ba4d519744dd86ceb28f93c073bbc04d022a
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676112569.nid006299.86070.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86db2238a105875b6d2bfc272e719be5813b1a630248eb3ae14b2dbf21775136
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676112794.nid006299.92926.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffd93449607b4e0f8d27efd909f5dc5a26519736dcbc085619ed271edc7b4b7b
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676142403.nid005382.129911.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4fb8ec30f0313f79878429495b81d8e058c62002bb6fb76419a8803d57a01f4
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676144435.nid006070.114854.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0faca19a93242ac9eabc67a5add9f659f4e319485712a2f762da584ffb3f1fb
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676144612.nid006070.118769.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3811ff81c07c2b00c0549c9945cc284115a27c3ac57c5b978974ac5698b63af2
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676144755.nid005382.14084.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71134aeccce957d5e1d067e6f7cb87043955dc9ebae3271c1d6bd85bfbc491a2
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676146757.nid005695.2381.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82d57b74d5ba75a2461f3c3f1bbccc8c844b28fa4dce91ce9bbe75a1bf1cc333
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676146910.nid005695.6476.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:468b9d44af9dea593a1882dc518baa569ab1299d920abbf5971b7120dad75b93
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147023.nid005695.10153.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f0aa279871ac20102491d4431b2383ae8120bbdf2b7ec4c575296a184f9721d
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147183.nid005695.14015.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01e62c12fef00290cb471e64e6789b5ae6516c22ba689ab5dbe5a9a2821bd8c4
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147337.nid005695.17432.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9959dc9828454a1c6087ac55023116a6ef775c2c8f9b738edc0ed216fc2d6de3
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147490.nid005695.21295.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe1f298cbe927960fbd21a680be79f4ce2370a65f11a02ac57e223c10737f4de
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676147650.nid005382.34778.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7ed394d94d1395e31521cb3d9f31454c22e68d7bf08bce1e1e6686df3394302
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152377.nid005695.69797.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2e85b168d5a233dcc00a97ff79f2908998d39ccf6e108891217e978ec0a908a
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152542.nid006070.43023.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c0df1b117503e3f85959aa16f0ea9e88b8c493e7ad0e9b102f908498a0addec
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152666.nid006070.46458.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7561d0d5c8067b140ee04ef936dbdc3e5c9664f812a840c3ece7e2c760637bd3
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152812.nid007236.128492.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:959eabb64aa405ac38db02da0dafa3c9ad3c449bb4a97210aaa6892494c308e8
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676152962.nid007236.2282.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4e3a7e039c2387e082c4b07a9562bfb8d78e02840289ce8105de03c8b9eccd4
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676153132.nid006070.59365.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22c6763c0bbbcead6f7f89b3ff540ca27d42b7ee9d2538f9cef7be25e9a081ca
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676153307.nid006070.62963.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37df3f475822df2f686b1b352818088b7fff7da8162f49899ee39c34b4b1d9fc
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676195864.nid006255.7086.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16a8253a0b40c44064c42bfd907dbbaa8f4a6365898ce699784e68d5978b6ee9
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676197952.nid006255.20098.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f14ba341aa76494ae98bbb88ccb0b0e7da790d8530ede042b74e864124b9b4d9
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676199898.nid006255.36081.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad8b412637f3fd4e8c6f35080b40e98efb6cd9f9d39e0d9d7691e55d79aed231
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676201997.nid006255.49355.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b7d94f82ee8abd240b1444106d38f041ddb26bb90f05c7bd87f2942e8695d7c
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676203967.nid005381.105183.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d9cea510ec16b5f21812449c5b305fcda248c1775074f7138e62b0d8021106a
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676222207.nid006618.79559.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b78350307fb4473e7d9e619a3f0b5830645616ee770f920ebbdfaa6beb07a0eb
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676222711.nid007035.120629.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38b3a4b662036efa1e3169dbfad0009fef731e31e472f489e9353af34fab6af9
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676223197.nid007035.126064.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1822963ecd55a22df184d59345491456805de452a1691f2e76b51b098f8093ce
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676223674.nid007217.102216.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2598ea1f29f08154e5915ef068fec2e0ec4050be93060c65a599adc83514b4db
3
+ size 19746
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676224159.nid006997.52675.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc766b8325133e3c75f3e46a91fc2d9174e92a588e67df02584111ed9a227697
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676224640.nid006997.58319.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e5d5ce6fd2b2ca1cc8ef8726eb85bcb52543f4168a184a1099a631dc03b8089
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676225130.nid006997.66088.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d85f4f28a6cae966c6feb268a9d6b8bf1aa68b27f50cb6384c7ea9fe9cf8b7
3
+ size 40
tensorboard/tensorboard_4b284b84boscarv2/events.out.tfevents.1676225618.nid006997.71542.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf0fff9af17fad69e80cb7fb6ed1682ed1c6440cf13e3cd441df20bdbc1782e0
3
+ size 40