ferdinand.mom
commited on
Commit
•
0722166
1
Parent(s):
b79f60a
remove illegal combo bench
Browse files- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/bench.slurm +0 -111
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/config.yaml +0 -90
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/log.out +0 -0
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/status.txt +0 -1
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/bench.slurm +0 -111
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/config.yaml +0 -90
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/log.out +0 -0
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/status.txt +0 -1
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/bench.slurm +0 -111
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/config.yaml +0 -90
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/log.out +0 -0
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/status.txt +0 -1
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/bench.slurm +0 -111
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/config.yaml +0 -90
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/log.out +0 -550
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/status.txt +0 -1
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/bench.slurm +0 -111
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/config.yaml +0 -90
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/log.out +0 -0
- llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/status.txt +0 -1
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=00:59:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=2
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 2 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024 llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024 --commit-message "Upload llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 1
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 32
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 1
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 1024
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-1024/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=00:59:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=2
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 2 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128 llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128 --commit-message "Upload llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 1
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 32
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 8
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 128
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-128/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=00:59:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=2
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 2 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256 llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256 --commit-message "Upload llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 1
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 32
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 4
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 256
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-256/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=00:59:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=2
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 2 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32 llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32 --commit-message "Upload llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 1
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 32
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 32
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 32
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/log.out
DELETED
@@ -1,550 +0,0 @@
|
|
1 |
-
========================
|
2 |
-
START TIME: Tue Jul 2 16:31:55 UTC 2024
|
3 |
-
python3 version = Python 3.10.14
|
4 |
-
========================
|
5 |
-
The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
|
6 |
-
Token is valid (permission: write).
|
7 |
-
Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
|
8 |
-
Login successful
|
9 |
-
Already on 'bench_cluster'
|
10 |
-
M examples/config_tiny_llama.py
|
11 |
-
M examples/config_tiny_llama.yaml
|
12 |
-
M examples/train_tiny_llama.sh
|
13 |
-
M src/nanotron/models/llama.py
|
14 |
-
M src/nanotron/trainer.py
|
15 |
-
Your branch is up to date with 'origin/bench_cluster'.
|
16 |
-
Job status: RUNNING
|
17 |
-
W0702 16:31:57.484000 139796830594880 torch/distributed/run.py:757]
|
18 |
-
W0702 16:31:57.484000 139796830594880 torch/distributed/run.py:757] *****************************************
|
19 |
-
W0702 16:31:57.484000 139796830594880 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
20 |
-
W0702 16:31:57.484000 139796830594880 torch/distributed/run.py:757] *****************************************
|
21 |
-
W0702 16:31:57.490000 140386122716992 torch/distributed/run.py:757]
|
22 |
-
W0702 16:31:57.490000 140386122716992 torch/distributed/run.py:757] *****************************************
|
23 |
-
W0702 16:31:57.490000 140386122716992 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
24 |
-
W0702 16:31:57.490000 140386122716992 torch/distributed/run.py:757] *****************************************
|
25 |
-
[default0]:07/02/2024 16:32:15 [WARNING|DP=0|PP=0|TP=0|ip-26-0-169-239]: [Vocab Size Padding] Padded vocab (size: 50257) with 15 dummy tokens (new size: 50272)
|
26 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Config:
|
27 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Config(general=GeneralArgs(project='bench_cluster',
|
28 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: run='%date_%jobid',
|
29 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: seed=42,
|
30 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: step=None,
|
31 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: consumed_train_samples=None,
|
32 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: benchmark_csv_path=None,
|
33 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: ignore_sanity_checks=True),
|
34 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: parallelism=ParallelismArgs(dp=1,
|
35 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: pp=1,
|
36 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tp=16,
|
37 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f55d6098790>,
|
38 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
39 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tp_linear_async_communication=False,
|
40 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: expert_parallel_size=1),
|
41 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
|
42 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: eos_token_id=2,
|
43 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: hidden_act='silu',
|
44 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: hidden_size=2048,
|
45 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: initializer_range=0.02,
|
46 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: intermediate_size=4096,
|
47 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: is_llama_config=True,
|
48 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: max_position_embeddings=4096,
|
49 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: num_attention_heads=32,
|
50 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: num_hidden_layers=24,
|
51 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: num_key_value_heads=32,
|
52 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: pad_token_id=None,
|
53 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: pretraining_tp=1,
|
54 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: rms_norm_eps=1e-05,
|
55 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: rope_scaling=None,
|
56 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: rope_theta=10000.0,
|
57 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tie_word_embeddings=True,
|
58 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: use_cache=True,
|
59 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: vocab_size=50272),
|
60 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: init_method=RandomInit(std=0.025),
|
61 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: dtype=torch.bfloat16,
|
62 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: make_vocab_size_divisible_by=1,
|
63 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: ddp_bucket_cap_mb=25),
|
64 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
|
65 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tokenizer_revision=None,
|
66 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tokenizer_max_length=None),
|
67 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
|
68 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: checkpoint_interval=100000,
|
69 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: save_initial_state=False,
|
70 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: resume_checkpoint_path=None,
|
71 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: checkpoints_path_is_shared_file_system=False),
|
72 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: logging=LoggingArgs(log_level='info',
|
73 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: log_level_replica='info',
|
74 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: iteration_step_info_interval=1),
|
75 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tokens=TokensArgs(sequence_length=4096,
|
76 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: train_steps=20,
|
77 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: micro_batch_size=32,
|
78 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: batch_accumulation_per_replica=32,
|
79 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: val_check_interval=-1,
|
80 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: limit_val_batches=0,
|
81 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: limit_test_batches=0),
|
82 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
83 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: adam_beta1=0.9,
|
84 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: adam_beta2=0.95,
|
85 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: torch_adam_is_fused=True,
|
86 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: name='adamW'),
|
87 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: zero_stage=1,
|
88 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: weight_decay=0.01,
|
89 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: clip_grad=1.0,
|
90 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: accumulate_grad_in_fp32=True,
|
91 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
|
92 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: lr_warmup_steps=1,
|
93 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: lr_warmup_style='linear',
|
94 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: lr_decay_style='linear',
|
95 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: lr_decay_steps=19,
|
96 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: lr_decay_starting_step=None,
|
97 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: min_decay_lr=1e-05)),
|
98 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: data_stages=[DatasetStageArgs(name='Training Stage',
|
99 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: start_training_step=1,
|
100 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
|
101 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: hf_dataset_splits='train',
|
102 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: hf_dataset_config_name=None,
|
103 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: dataset_processing_num_proc_per_process=64,
|
104 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: dataset_overwrite_cache=False,
|
105 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: text_column_name='text'),
|
106 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: seed=42,
|
107 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: num_loading_workers=32))],
|
108 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32')),
|
109 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: lighteval=None)
|
110 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Model Config:
|
111 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: LlamaConfig(bos_token_id=1,
|
112 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: eos_token_id=2,
|
113 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: hidden_act='silu',
|
114 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: hidden_size=2048,
|
115 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: initializer_range=0.02,
|
116 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: intermediate_size=4096,
|
117 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: is_llama_config=True,
|
118 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: max_position_embeddings=4096,
|
119 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: num_attention_heads=32,
|
120 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: num_hidden_layers=24,
|
121 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: num_key_value_heads=32,
|
122 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: pad_token_id=None,
|
123 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: pretraining_tp=1,
|
124 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: rms_norm_eps=1e-05,
|
125 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: rope_scaling=None,
|
126 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: rope_theta=10000.0,
|
127 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: tie_word_embeddings=True,
|
128 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: use_cache=True,
|
129 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: vocab_size=50272)
|
130 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Building model..
|
131 |
-
[default0]:07/02/2024 16:32:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Setting PP block ranks...
|
132 |
-
[default3]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=3|ip-26-0-169-239]: Local number of parameters: 69.4M (132.46MiB)
|
133 |
-
[default3]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=3|ip-26-0-169-239]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
134 |
-
[default4]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=4|ip-26-0-169-239]: Local number of parameters: 69.4M (132.46MiB)
|
135 |
-
[default4]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=4|ip-26-0-169-239]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
136 |
-
[default1]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=1|ip-26-0-169-239]: Local number of parameters: 69.4M (132.46MiB)
|
137 |
-
[default4]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=4|ip-26-0-169-239]: No checkpoint path provided.
|
138 |
-
[default3]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=3|ip-26-0-169-239]: No checkpoint path provided.
|
139 |
-
[default1]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=1|ip-26-0-169-239]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
140 |
-
[default1]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=1|ip-26-0-169-239]: No checkpoint path provided.
|
141 |
-
[default5]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=5|ip-26-0-169-239]: Local number of parameters: 69.4M (132.46MiB)
|
142 |
-
[default5]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=5|ip-26-0-169-239]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
143 |
-
[default5]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=5|ip-26-0-169-239]: No checkpoint path provided.
|
144 |
-
[default7]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=7|ip-26-0-169-239]: Local number of parameters: 69.4M (132.46MiB)
|
145 |
-
[default7]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=7|ip-26-0-169-239]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
146 |
-
[default7]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=7|ip-26-0-169-239]: No checkpoint path provided.
|
147 |
-
[default2]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=2|ip-26-0-169-239]: Local number of parameters: 69.4M (132.46MiB)
|
148 |
-
[default2]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=2|ip-26-0-169-239]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
149 |
-
[default2]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=2|ip-26-0-169-239]: No checkpoint path provided.
|
150 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Total number of parameters: 1.11G (2119.44MiB)
|
151 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Local number of parameters: 69.4M (132.46MiB)
|
152 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
153 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: No checkpoint path provided.
|
154 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Parametrizing model parameters using StandardParametrizator
|
155 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: [Optimizer Building] Using LearningRateForSP as learning rate
|
156 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: [ZeRO sharding] Size of optimizer params per rank:
|
157 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: [ZeRO sharding] DP Rank 0 has 69.4M out of 69.4M (100.00%) params' optimizer states
|
158 |
-
[default6]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=6|ip-26-0-169-239]: Local number of parameters: 69.4M (132.46MiB)
|
159 |
-
[default6]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=6|ip-26-0-169-239]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
160 |
-
[default6]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=6|ip-26-0-169-239]: No checkpoint path provided.
|
161 |
-
[default3]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=11|ip-26-0-169-247]: Local number of parameters: 69.4M (132.46MiB)
|
162 |
-
[default3]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=11|ip-26-0-169-247]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
163 |
-
[default3]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=11|ip-26-0-169-247]: No checkpoint path provided.
|
164 |
-
[default1]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=9|ip-26-0-169-247]: Local number of parameters: 69.4M (132.46MiB)
|
165 |
-
[default1]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=9|ip-26-0-169-247]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
166 |
-
[default1]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=9|ip-26-0-169-247]: No checkpoint path provided.
|
167 |
-
[default5]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=13|ip-26-0-169-247]: Local number of parameters: 69.4M (132.46MiB)
|
168 |
-
[default5]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=13|ip-26-0-169-247]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
169 |
-
[default2]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=10|ip-26-0-169-247]: Local number of parameters: 69.4M (132.46MiB)
|
170 |
-
[default6]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=14|ip-26-0-169-247]: Local number of parameters: 69.4M (132.46MiB)
|
171 |
-
[default6]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=14|ip-26-0-169-247]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
172 |
-
[default5]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=13|ip-26-0-169-247]: No checkpoint path provided.
|
173 |
-
[default4]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=12|ip-26-0-169-247]: Local number of parameters: 69.4M (132.46MiB)
|
174 |
-
[default4]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=12|ip-26-0-169-247]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
175 |
-
[default2]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=10|ip-26-0-169-247]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
176 |
-
[default2]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=10|ip-26-0-169-247]: No checkpoint path provided.
|
177 |
-
[default6]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=14|ip-26-0-169-247]: No checkpoint path provided.
|
178 |
-
[default4]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=12|ip-26-0-169-247]: No checkpoint path provided.
|
179 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=8|ip-26-0-169-247]: Local number of parameters: 69.4M (132.46MiB)
|
180 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=8|ip-26-0-169-247]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
181 |
-
[default0]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=8|ip-26-0-169-247]: No checkpoint path provided.
|
182 |
-
[default7]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=15|ip-26-0-169-247]: Local number of parameters: 69.4M (132.46MiB)
|
183 |
-
[default7]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=15|ip-26-0-169-247]: [After model building] Memory usage: 159.71MiB. Peak allocated: 174.02MiB Peak reserved: 178.00MiB
|
184 |
-
[default7]:07/02/2024 16:32:32 [INFO|DP=0|PP=0|TP=15|ip-26-0-169-247]: No checkpoint path provided.
|
185 |
-
[default0]:07/02/2024 16:32:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
|
186 |
-
[default0]:07/02/2024 16:32:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Using `datasets` library
|
187 |
-
[default0]:07/02/2024 16:32:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
|
188 |
-
[default0]:07/02/2024 16:32:33 [WARNING|DP=0|PP=0|TP=0|ip-26-0-169-239]: Repo card metadata block was not found. Setting CardData to empty.
|
189 |
-
[default0]:Repo card metadata block was not found. Setting CardData to empty.
|
190 |
-
[default0]:07/02/2024 16:32:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: [Training Plan] There are 1 training stages
|
191 |
-
[default0]:07/02/2024 16:32:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: [Stage Training Stage] start from step 1
|
192 |
-
[default0]:07/02/2024 16:32:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]:
|
193 |
-
[default0]:07/02/2024 16:32:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: [Start training] datetime: 2024-07-02 16:32:34.409740 | mbs: 32 | grad_accum: 32 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
|
194 |
-
[default0]:07/02/2024 16:32:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
|
195 |
-
[default0]:07/02/2024 16:32:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-169-239]: Memory usage: 689.57MiB. Peak allocated 689.57MiB. Peak reserved: 710.00MiB
|
196 |
-
[default4]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=4|ip-26-0-169-239]: Repo card metadata block was not found. Setting CardData to empty.
|
197 |
-
[default2]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=2|ip-26-0-169-239]: Repo card metadata block was not found. Setting CardData to empty.
|
198 |
-
[default2]:Repo card metadata block was not found. Setting CardData to empty.
|
199 |
-
[default0]:Repo card metadata block was not found. Setting CardData to empty.
|
200 |
-
[default5]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=13|ip-26-0-169-247]: Repo card metadata block was not found. Setting CardData to empty.
|
201 |
-
[default2]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=10|ip-26-0-169-247]: Repo card metadata block was not found. Setting CardData to empty.
|
202 |
-
[default1]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=9|ip-26-0-169-247]: Repo card metadata block was not found. Setting CardData to empty.
|
203 |
-
[default4]:Repo card metadata block was not found. Setting CardData to empty.
|
204 |
-
[default0]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=8|ip-26-0-169-247]: Repo card metadata block was not found. Setting CardData to empty.
|
205 |
-
[default3]:Repo card metadata block was not found. Setting CardData to empty.
|
206 |
-
[default1]:Repo card metadata block was not found. Setting CardData to empty.
|
207 |
-
[default5]:Repo card metadata block was not found. Setting CardData to empty.
|
208 |
-
[default2]:Repo card metadata block was not found. Setting CardData to empty.
|
209 |
-
[default3]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=3|ip-26-0-169-239]: Repo card metadata block was not found. Setting CardData to empty.
|
210 |
-
[default1]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=1|ip-26-0-169-239]: Repo card metadata block was not found. Setting CardData to empty.
|
211 |
-
[default5]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=5|ip-26-0-169-239]: Repo card metadata block was not found. Setting CardData to empty.
|
212 |
-
[default7]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=7|ip-26-0-169-239]: Repo card metadata block was not found. Setting CardData to empty.
|
213 |
-
[default3]:Repo card metadata block was not found. Setting CardData to empty.
|
214 |
-
[default1]:Repo card metadata block was not found. Setting CardData to empty.
|
215 |
-
[default6]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=6|ip-26-0-169-239]: Repo card metadata block was not found. Setting CardData to empty.
|
216 |
-
[default6]:Repo card metadata block was not found. Setting CardData to empty.
|
217 |
-
[default3]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=11|ip-26-0-169-247]: Repo card metadata block was not found. Setting CardData to empty.
|
218 |
-
[default6]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=14|ip-26-0-169-247]: Repo card metadata block was not found. Setting CardData to empty.
|
219 |
-
[default4]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=12|ip-26-0-169-247]: Repo card metadata block was not found. Setting CardData to empty.
|
220 |
-
[default5]:Repo card metadata block was not found. Setting CardData to empty.
|
221 |
-
[default7]:Repo card metadata block was not found. Setting CardData to empty.
|
222 |
-
[default4]:Repo card metadata block was not found. Setting CardData to empty.
|
223 |
-
[default7]:07/02/2024 16:32:34 [WARNING|DP=0|PP=0|TP=15|ip-26-0-169-247]: Repo card metadata block was not found. Setting CardData to empty.
|
224 |
-
[default6]:Repo card metadata block was not found. Setting CardData to empty.
|
225 |
-
[default7]:Repo card metadata block was not found. Setting CardData to empty.
|
226 |
-
[default7]:[rank7]: OSError: [Errno 122] Disk quota exceeded
|
227 |
-
[default7]:
|
228 |
-
[default7]:[rank7]: During handling of the above exception, another exception occurred:
|
229 |
-
[default7]:
|
230 |
-
[default7]:[rank7]: Traceback (most recent call last):
|
231 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
|
232 |
-
[default7]:[rank7]: trainer.train(dataloader)
|
233 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
|
234 |
-
[default7]:[rank7]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
235 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
|
236 |
-
[default7]:[rank7]: outputs = self.pipeline_engine.train_batch_iter(
|
237 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
238 |
-
[default7]:[rank7]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
239 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
240 |
-
[default7]:[rank7]: output = model(**micro_batch)
|
241 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
242 |
-
[default7]:[rank7]: return self._call_impl(*args, **kwargs)
|
243 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
244 |
-
[default7]:[rank7]: return forward_call(*args, **kwargs)
|
245 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
|
246 |
-
[default7]:[rank7]: sharded_logits = self.model(
|
247 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
248 |
-
[default7]:[rank7]: return self._call_impl(*args, **kwargs)
|
249 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
250 |
-
[default7]:[rank7]: return forward_call(*args, **kwargs)
|
251 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
|
252 |
-
[default7]:[rank7]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
253 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
|
254 |
-
[default7]:[rank7]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
255 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
256 |
-
[default7]:[rank7]: return self._call_impl(*args, **kwargs)
|
257 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
258 |
-
[default7]:[rank7]: return forward_call(*args, **kwargs)
|
259 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
260 |
-
[default7]:[rank7]: output = self.pp_block(**new_kwargs)
|
261 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
262 |
-
[default7]:[rank7]: return self._call_impl(*args, **kwargs)
|
263 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
264 |
-
[default7]:[rank7]: return forward_call(*args, **kwargs)
|
265 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
|
266 |
-
[default7]:[rank7]: hidden_states = self.input_layernorm(hidden_states)
|
267 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
268 |
-
[default7]:[rank7]: return self._call_impl(*args, **kwargs)
|
269 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
270 |
-
[default7]:[rank7]: return forward_call(*args, **kwargs)
|
271 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
|
272 |
-
[default7]:[rank7]: return layer_norm_fn(
|
273 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
|
274 |
-
[default7]:[rank7]: return LayerNormFn.apply(
|
275 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
|
276 |
-
[default7]:[rank7]: return super().apply(*args, **kwargs) # type: ignore[misc]
|
277 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
|
278 |
-
[default7]:[rank7]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
|
279 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
|
280 |
-
[default7]:[rank7]: _layer_norm_fwd_1pass_kernel[(M,)](
|
281 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
|
282 |
-
[default7]:[rank7]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
|
283 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
|
284 |
-
[default7]:[rank7]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
|
285 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
|
286 |
-
[default7]:[rank7]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
|
287 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
|
288 |
-
[default7]:[rank7]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
|
289 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
|
290 |
-
[default7]:[rank7]: fn()
|
291 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
|
292 |
-
[default7]:[rank7]: self.fn.run(
|
293 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
294 |
-
[default7]:[rank7]: return self.fn.run(*args, **kwargs)
|
295 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
296 |
-
[default7]:[rank7]: return self.fn.run(*args, **kwargs)
|
297 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
298 |
-
[default7]:[rank7]: return self.fn.run(*args, **kwargs)
|
299 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
|
300 |
-
[default7]:[rank7]: self.cache[device][key] = compile(
|
301 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
|
302 |
-
[default7]:[rank7]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
|
303 |
-
[default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
|
304 |
-
[default7]:[rank7]: with open(temp_path, mode) as f:
|
305 |
-
[default7]:[rank7]: OSError: [Errno 122] Disk quota exceeded
|
306 |
-
[default4]:[rank4]: OSError: [Errno 122] Disk quota exceeded
|
307 |
-
[default4]:
|
308 |
-
[default4]:[rank4]: During handling of the above exception, another exception occurred:
|
309 |
-
[default4]:
|
310 |
-
[default4]:[rank4]: Traceback (most recent call last):
|
311 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
|
312 |
-
[default4]:[rank4]: trainer.train(dataloader)
|
313 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
|
314 |
-
[default4]:[rank4]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
315 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
|
316 |
-
[default4]:[rank4]: outputs = self.pipeline_engine.train_batch_iter(
|
317 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
318 |
-
[default4]:[rank4]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
319 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
320 |
-
[default4]:[rank4]: output = model(**micro_batch)
|
321 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
322 |
-
[default4]:[rank4]: return self._call_impl(*args, **kwargs)
|
323 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
324 |
-
[default4]:[rank4]: return forward_call(*args, **kwargs)
|
325 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
|
326 |
-
[default4]:[rank4]: sharded_logits = self.model(
|
327 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
328 |
-
[default4]:[rank4]: return self._call_impl(*args, **kwargs)
|
329 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
330 |
-
[default4]:[rank4]: return forward_call(*args, **kwargs)
|
331 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
|
332 |
-
[default4]:[rank4]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
333 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
|
334 |
-
[default4]:[rank4]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
335 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
336 |
-
[default4]:[rank4]: return self._call_impl(*args, **kwargs)
|
337 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
338 |
-
[default4]:[rank4]: return forward_call(*args, **kwargs)
|
339 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
340 |
-
[default4]:[rank4]: output = self.pp_block(**new_kwargs)
|
341 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
342 |
-
[default4]:[rank4]: return self._call_impl(*args, **kwargs)
|
343 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
344 |
-
[default4]:[rank4]: return forward_call(*args, **kwargs)
|
345 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
|
346 |
-
[default4]:[rank4]: hidden_states = self.input_layernorm(hidden_states)
|
347 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
348 |
-
[default4]:[rank4]: return self._call_impl(*args, **kwargs)
|
349 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
350 |
-
[default4]:[rank4]: return forward_call(*args, **kwargs)
|
351 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
|
352 |
-
[default4]:[rank4]: return layer_norm_fn(
|
353 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
|
354 |
-
[default4]:[rank4]: return LayerNormFn.apply(
|
355 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
|
356 |
-
[default4]:[rank4]: return super().apply(*args, **kwargs) # type: ignore[misc]
|
357 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
|
358 |
-
[default4]:[rank4]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
|
359 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
|
360 |
-
[default4]:[rank4]: _layer_norm_fwd_1pass_kernel[(M,)](
|
361 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
|
362 |
-
[default4]:[rank4]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
|
363 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
|
364 |
-
[default4]:[rank4]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
|
365 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
|
366 |
-
[default4]:[rank4]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
|
367 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
|
368 |
-
[default4]:[rank4]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
|
369 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
|
370 |
-
[default4]:[rank4]: fn()
|
371 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
|
372 |
-
[default4]:[rank4]: self.fn.run(
|
373 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
374 |
-
[default4]:[rank4]: return self.fn.run(*args, **kwargs)
|
375 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
376 |
-
[default4]:[rank4]: return self.fn.run(*args, **kwargs)
|
377 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
378 |
-
[default4]:[rank4]: return self.fn.run(*args, **kwargs)
|
379 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
|
380 |
-
[default4]:[rank4]: self.cache[device][key] = compile(
|
381 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
|
382 |
-
[default4]:[rank4]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
|
383 |
-
[default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
|
384 |
-
[default4]:[rank4]: with open(temp_path, mode) as f:
|
385 |
-
[default4]:[rank4]: OSError: [Errno 122] Disk quota exceeded
|
386 |
-
[default4]:[rank12]: OSError: [Errno 122] Disk quota exceeded
|
387 |
-
[default4]:
|
388 |
-
[default4]:[rank12]: During handling of the above exception, another exception occurred:
|
389 |
-
[default4]:
|
390 |
-
[default4]:[rank12]: Traceback (most recent call last):
|
391 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
|
392 |
-
[default4]:[rank12]: trainer.train(dataloader)
|
393 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
|
394 |
-
[default4]:[rank12]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
|
395 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
|
396 |
-
[default4]:[rank12]: outputs = self.pipeline_engine.train_batch_iter(
|
397 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
|
398 |
-
[default4]:[rank12]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
|
399 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
|
400 |
-
[default4]:[rank12]: output = model(**micro_batch)
|
401 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
402 |
-
[default4]:[rank12]: return self._call_impl(*args, **kwargs)
|
403 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
404 |
-
[default4]:[rank12]: return forward_call(*args, **kwargs)
|
405 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
|
406 |
-
[default4]:[rank12]: sharded_logits = self.model(
|
407 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
408 |
-
[default4]:[rank12]: return self._call_impl(*args, **kwargs)
|
409 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
410 |
-
[default4]:[rank12]: return forward_call(*args, **kwargs)
|
411 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
|
412 |
-
[default4]:[rank12]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
|
413 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
|
414 |
-
[default4]:[rank12]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
|
415 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
416 |
-
[default4]:[rank12]: return self._call_impl(*args, **kwargs)
|
417 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
418 |
-
[default4]:[rank12]: return forward_call(*args, **kwargs)
|
419 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
|
420 |
-
[default4]:[rank12]: output = self.pp_block(**new_kwargs)
|
421 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
422 |
-
[default4]:[rank12]: return self._call_impl(*args, **kwargs)
|
423 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
424 |
-
[default4]:[rank12]: return forward_call(*args, **kwargs)
|
425 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
|
426 |
-
[default4]:[rank12]: hidden_states = self.input_layernorm(hidden_states)
|
427 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
|
428 |
-
[default4]:[rank12]: return self._call_impl(*args, **kwargs)
|
429 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
|
430 |
-
[default4]:[rank12]: return forward_call(*args, **kwargs)
|
431 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
|
432 |
-
[default4]:[rank12]: return layer_norm_fn(
|
433 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
|
434 |
-
[default4]:[rank12]: return LayerNormFn.apply(
|
435 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
|
436 |
-
[default4]:[rank12]: return super().apply(*args, **kwargs) # type: ignore[misc]
|
437 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
|
438 |
-
[default4]:[rank12]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
|
439 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
|
440 |
-
[default4]:[rank12]: _layer_norm_fwd_1pass_kernel[(M,)](
|
441 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
|
442 |
-
[default4]:[rank12]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
|
443 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
|
444 |
-
[default4]:[rank12]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
|
445 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
|
446 |
-
[default4]:[rank12]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
|
447 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
|
448 |
-
[default4]:[rank12]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
|
449 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
|
450 |
-
[default4]:[rank12]: fn()
|
451 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
|
452 |
-
[default4]:[rank12]: self.fn.run(
|
453 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
454 |
-
[default4]:[rank12]: return self.fn.run(*args, **kwargs)
|
455 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
456 |
-
[default4]:[rank12]: return self.fn.run(*args, **kwargs)
|
457 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
|
458 |
-
[default4]:[rank12]: return self.fn.run(*args, **kwargs)
|
459 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
|
460 |
-
[default4]:[rank12]: self.cache[device][key] = compile(
|
461 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
|
462 |
-
[default4]:[rank12]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
|
463 |
-
[default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
|
464 |
-
[default4]:[rank12]: with open(temp_path, mode) as f:
|
465 |
-
[default4]:[rank12]: OSError: [Errno 122] Disk quota exceeded
|
466 |
-
W0702 16:32:48.847000 140386122716992 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2427255 closing signal SIGTERM
|
467 |
-
W0702 16:32:48.852000 140386122716992 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2427256 closing signal SIGTERM
|
468 |
-
W0702 16:32:48.854000 140386122716992 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2427257 closing signal SIGTERM
|
469 |
-
W0702 16:32:48.862000 139796830594880 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 99623 closing signal SIGTERM
|
470 |
-
W0702 16:32:48.863000 140386122716992 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2427258 closing signal SIGTERM
|
471 |
-
W0702 16:32:48.866000 139796830594880 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 99624 closing signal SIGTERM
|
472 |
-
W0702 16:32:48.871000 139796830594880 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 99625 closing signal SIGTERM
|
473 |
-
W0702 16:32:48.875000 139796830594880 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 99626 closing signal SIGTERM
|
474 |
-
W0702 16:32:48.878000 139796830594880 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 99628 closing signal SIGTERM
|
475 |
-
W0702 16:32:48.880000 139796830594880 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 99629 closing signal SIGTERM
|
476 |
-
W0702 16:32:48.882000 139796830594880 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 99630 closing signal SIGTERM
|
477 |
-
W0702 16:32:48.891000 140386122716992 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2427260 closing signal SIGTERM
|
478 |
-
W0702 16:32:48.894000 140386122716992 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2427261 closing signal SIGTERM
|
479 |
-
E0702 16:32:51.295000 140386122716992 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 4 (pid: 2427259) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
|
480 |
-
Traceback (most recent call last):
|
481 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
|
482 |
-
sys.exit(main())
|
483 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
|
484 |
-
return f(*args, **kwargs)
|
485 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
|
486 |
-
run(args)
|
487 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
|
488 |
-
elastic_launch(
|
489 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
|
490 |
-
return launch_agent(self._config, self._entrypoint, list(args))
|
491 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
|
492 |
-
raise ChildFailedError(
|
493 |
-
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
494 |
-
============================================================
|
495 |
-
/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
|
496 |
-
------------------------------------------------------------
|
497 |
-
Failures:
|
498 |
-
[1]:
|
499 |
-
time : 2024-07-02_16:32:48
|
500 |
-
host : ip-26-0-169-239.ec2.internal
|
501 |
-
rank : 7 (local_rank: 7)
|
502 |
-
exitcode : 1 (pid: 2427262)
|
503 |
-
error_file: <N/A>
|
504 |
-
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
505 |
-
------------------------------------------------------------
|
506 |
-
Root Cause (first observed failure):
|
507 |
-
[0]:
|
508 |
-
time : 2024-07-02_16:32:48
|
509 |
-
host : ip-26-0-169-239.ec2.internal
|
510 |
-
rank : 4 (local_rank: 4)
|
511 |
-
exitcode : 1 (pid: 2427259)
|
512 |
-
error_file: <N/A>
|
513 |
-
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
514 |
-
============================================================
|
515 |
-
E0702 16:32:51.480000 139796830594880 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 4 (pid: 99627) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
|
516 |
-
W0702 16:32:51.487000 139796830594880 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-169-247.ec2.internal_99554_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
|
517 |
-
W0702 16:32:51.515000 139796830594880 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-169-247.ec2.internal_99554_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
|
518 |
-
W0702 16:32:51.525000 139796830594880 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-169-247.ec2.internal_99554_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
|
519 |
-
Traceback (most recent call last):
|
520 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
|
521 |
-
sys.exit(main())
|
522 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
|
523 |
-
return f(*args, **kwargs)
|
524 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
|
525 |
-
run(args)
|
526 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
|
527 |
-
elastic_launch(
|
528 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
|
529 |
-
return launch_agent(self._config, self._entrypoint, list(args))
|
530 |
-
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
|
531 |
-
raise ChildFailedError(
|
532 |
-
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
533 |
-
============================================================
|
534 |
-
/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
|
535 |
-
------------------------------------------------------------
|
536 |
-
Failures:
|
537 |
-
<NO_OTHER_FAILURES>
|
538 |
-
------------------------------------------------------------
|
539 |
-
Root Cause (first observed failure):
|
540 |
-
[0]:
|
541 |
-
time : 2024-07-02_16:32:48
|
542 |
-
host : ip-26-0-169-247.ec2.internal
|
543 |
-
rank : 12 (local_rank: 4)
|
544 |
-
exitcode : 1 (pid: 99627)
|
545 |
-
error_file: <N/A>
|
546 |
-
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
|
547 |
-
============================================================
|
548 |
-
srun: error: ip-26-0-169-239: task 0: Exited with exit code 1
|
549 |
-
srun: error: ip-26-0-169-247: task 1: Exited with exit code 1
|
550 |
-
Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-32/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
fail
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/bench.slurm
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
#SBATCH --job-name=bench_cluster
|
4 |
-
#SBATCH --time=00:59:00
|
5 |
-
#SBATCH --partition=hopper-prod
|
6 |
-
#SBATCH --nodes=2
|
7 |
-
#SBATCH --gres=gpu:8
|
8 |
-
#SBATCH --qos=high
|
9 |
-
#SBATCH --ntasks-per-node=1
|
10 |
-
#SBATCH --cpus-per-task=96
|
11 |
-
#SBATCH --exclusive
|
12 |
-
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/log.out
|
13 |
-
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/log.out
|
14 |
-
|
15 |
-
# Function to update status based on squeue output
|
16 |
-
update_status() {
|
17 |
-
job_id=$1
|
18 |
-
status_file=$2
|
19 |
-
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
-
while true; do
|
21 |
-
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
-
echo "Job status: $job_status"
|
23 |
-
if [ -z "$job_status" ]; then
|
24 |
-
# Job has finished or is not found
|
25 |
-
break
|
26 |
-
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
-
printf "running" > $status_file
|
28 |
-
break
|
29 |
-
fi
|
30 |
-
sleep 10
|
31 |
-
done
|
32 |
-
}
|
33 |
-
|
34 |
-
# Misc initializations.
|
35 |
-
echo "========================"
|
36 |
-
echo "START TIME: $(date)"
|
37 |
-
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
-
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
-
echo python3 version = $(python3 --version)
|
40 |
-
echo "========================"
|
41 |
-
|
42 |
-
# Slurm stuff
|
43 |
-
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
-
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
-
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
-
|
47 |
-
export TMPDIR=/scratch
|
48 |
-
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
-
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
-
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
-
|
52 |
-
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
-
|
54 |
-
|
55 |
-
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
-
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/config.yaml"
|
57 |
-
|
58 |
-
LAUNCHER="torchrun \
|
59 |
-
--nproc_per_node 8 \
|
60 |
-
--nnodes 2 \
|
61 |
-
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
-
--rdzv_backend c10d \
|
63 |
-
--max_restarts 0 \
|
64 |
-
--tee 3 \
|
65 |
-
--node_rank ${SLURM_PROCID}"
|
66 |
-
|
67 |
-
# Checkout the bench_cluster branch
|
68 |
-
cd $NANOTRON_REPO
|
69 |
-
git checkout bench_cluster
|
70 |
-
cd ..
|
71 |
-
# Get the current job ID
|
72 |
-
job_id=${SLURM_JOB_ID}
|
73 |
-
|
74 |
-
# Update status to "pending" or "running" in the background
|
75 |
-
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/status.txt &
|
76 |
-
|
77 |
-
# Run the main command
|
78 |
-
srun -u $LAUNCHER $CMD
|
79 |
-
exit_status=$?
|
80 |
-
|
81 |
-
# Update status based on the exit status of `srun`
|
82 |
-
if [ $exit_status -eq 0 ]; then
|
83 |
-
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/status.txt
|
84 |
-
else
|
85 |
-
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/log.out; then
|
86 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/status.txt
|
87 |
-
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/log.out; then
|
88 |
-
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/status.txt
|
89 |
-
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/log.out; then
|
90 |
-
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/status.txt
|
91 |
-
else
|
92 |
-
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/status.txt
|
93 |
-
fi
|
94 |
-
fi
|
95 |
-
|
96 |
-
# Run the report script if the job completed successfully
|
97 |
-
if [ $exit_status -eq 0 ]; then
|
98 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64 --is_logs
|
99 |
-
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64 --is_profiler
|
100 |
-
fi
|
101 |
-
|
102 |
-
|
103 |
-
# Push to hub the folder using huggingface_cli
|
104 |
-
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64 llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64 --commit-message "Upload llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64"
|
105 |
-
|
106 |
-
# Verify the upload
|
107 |
-
if [ $? -eq 0 ]; then
|
108 |
-
echo "Uploading to Huggingface Hub successful"
|
109 |
-
else
|
110 |
-
echo "Failed to upload to Huggingface Hub"
|
111 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/config.yaml
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
general:
|
2 |
-
project: bench_cluster
|
3 |
-
seed: 42
|
4 |
-
model:
|
5 |
-
ddp_bucket_cap_mb: 25
|
6 |
-
dtype: bfloat16
|
7 |
-
init_method:
|
8 |
-
std: 0.025
|
9 |
-
make_vocab_size_divisible_by: 1
|
10 |
-
model_config:
|
11 |
-
bos_token_id: 1
|
12 |
-
eos_token_id: 2
|
13 |
-
hidden_act: silu
|
14 |
-
hidden_size: 2048
|
15 |
-
initializer_range: 0.02
|
16 |
-
intermediate_size: 4096
|
17 |
-
is_llama_config: true
|
18 |
-
max_position_embeddings: 4096
|
19 |
-
num_attention_heads: 32
|
20 |
-
num_hidden_layers: 24
|
21 |
-
num_key_value_heads: 32
|
22 |
-
pad_token_id: null
|
23 |
-
pretraining_tp: 1
|
24 |
-
rms_norm_eps: 1.0e-05
|
25 |
-
rope_scaling: null
|
26 |
-
rope_theta: 10000.0
|
27 |
-
tie_word_embeddings: true
|
28 |
-
use_cache: true
|
29 |
-
vocab_size: 50257
|
30 |
-
optimizer:
|
31 |
-
accumulate_grad_in_fp32: true
|
32 |
-
clip_grad: 1.0
|
33 |
-
learning_rate_scheduler:
|
34 |
-
learning_rate: 0.0001
|
35 |
-
lr_decay_style: linear
|
36 |
-
lr_warmup_style: linear
|
37 |
-
lr_warmup_steps: 1
|
38 |
-
min_decay_lr: 1.0e-05
|
39 |
-
optimizer_factory:
|
40 |
-
adam_beta1: 0.9
|
41 |
-
adam_beta2: 0.95
|
42 |
-
adam_eps: 1.0e-08
|
43 |
-
name: adamW
|
44 |
-
torch_adam_is_fused: true
|
45 |
-
weight_decay: 0.01
|
46 |
-
zero_stage: 1
|
47 |
-
parallelism:
|
48 |
-
dp: 1
|
49 |
-
expert_parallel_size: 1
|
50 |
-
pp: 1
|
51 |
-
pp_engine: 1f1b
|
52 |
-
tp: 16
|
53 |
-
tp_linear_async_communication: false
|
54 |
-
tp_mode: REDUCE_SCATTER
|
55 |
-
profiler:
|
56 |
-
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64
|
57 |
-
tokenizer:
|
58 |
-
tokenizer_max_length: null
|
59 |
-
tokenizer_name_or_path: openai-community/gpt2
|
60 |
-
tokenizer_revision: null
|
61 |
-
data_stages:
|
62 |
-
- name: Training Stage
|
63 |
-
start_training_step: 1
|
64 |
-
data:
|
65 |
-
dataset:
|
66 |
-
dataset_overwrite_cache: false
|
67 |
-
dataset_processing_num_proc_per_process: 64
|
68 |
-
hf_dataset_config_name: null
|
69 |
-
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
-
hf_dataset_splits: train
|
71 |
-
text_column_name: text
|
72 |
-
num_loading_workers: 32
|
73 |
-
seed: 42
|
74 |
-
lighteval: null
|
75 |
-
tokens:
|
76 |
-
train_steps: 20
|
77 |
-
val_check_interval: -1
|
78 |
-
batch_accumulation_per_replica: 16
|
79 |
-
limit_test_batches: 0
|
80 |
-
limit_val_batches: 0
|
81 |
-
micro_batch_size: 64
|
82 |
-
sequence_length: 4096
|
83 |
-
logging:
|
84 |
-
iteration_step_info_interval: 1
|
85 |
-
log_level: info
|
86 |
-
log_level_replica: info
|
87 |
-
checkpoints:
|
88 |
-
checkpoint_interval: 100000
|
89 |
-
checkpoints_path: /dev/null
|
90 |
-
resume_checkpoint_path: null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/log.out
DELETED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/16_GPUS/dp-1_tp-16_pp-1_mbz-64/status.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
oom
|
|
|
|