ferdinand.mom
commited on
Commit
•
fbac927
1
Parent(s):
0722166
remove illegal combo bench for 64_GPUS
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/config.yaml +0 -90
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/log.out +0 -0
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/status.txt +0 -1
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/bench.slurm +0 -111
- llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/config.yaml +0 -90
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 1024
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 1
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-1/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
timeout
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 8
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 128
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-128/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 64
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 16
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-16/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
timeout
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 512
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 2
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-2/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
timeout
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 4
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 256
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-256/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 32
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 32
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-32/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
timeout
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 256
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 4
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-4/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
timeout
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 16
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 64
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-64/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8 llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 4
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 128
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 8
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-16_pp-4_mbz-8/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
timeout
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1 llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 2
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 32
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 1024
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 1
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
timeout
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024 llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 2
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 32
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 1
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 1024
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-1024/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128 llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 2
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 32
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 8
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 128
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-128/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=01:30:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=8
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 8 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16 llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16 --commit-message "Upload llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 2
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 32
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/64_GPUS/dp-1_tp-32_pp-2_mbz-16
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 0
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 64
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 16
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|