Upload llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8
Browse files- .gitattributes +1 -0
- llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/bench.slurm +111 -0
- llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/config.yaml +90 -0
- llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log.out +0 -0
- llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log_metrics.csv +21 -0
- llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/profiler.csv +2 -0
- llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/profiler/ip-26-0-160-225_1190124.1719935492562546823.pt.trace.json +3 -0
- llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/status.txt +1 -0
.gitattributes
CHANGED
@@ -39,3 +39,4 @@ llama-1B/16_GPUS/dp-1_tp-1_pp-16_mbz-1/profiler/ip-26-0-161-178_157959.171993139
|
|
39 |
llama-1B/16_GPUS/dp-1_tp-2_pp-8_mbz-2/profiler/ip-26-0-163-134_1435141.1719931469116122110.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
40 |
llama-1B/16_GPUS/dp-8_tp-1_pp-2_mbz-4/profiler/ip-26-0-171-56_3064380.1719933802382173616.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
41 |
llama-1B/16_GPUS/dp-4_tp-1_pp-4_mbz-2/profiler/ip-26-0-171-56_3084719.1719934470978269987.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
39 |
llama-1B/16_GPUS/dp-1_tp-2_pp-8_mbz-2/profiler/ip-26-0-163-134_1435141.1719931469116122110.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
40 |
llama-1B/16_GPUS/dp-8_tp-1_pp-2_mbz-4/profiler/ip-26-0-171-56_3064380.1719933802382173616.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
41 |
llama-1B/16_GPUS/dp-4_tp-1_pp-4_mbz-2/profiler/ip-26-0-171-56_3084719.1719934470978269987.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
42 |
+
llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/profiler/ip-26-0-160-225_1190124.1719935492562546823.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/bench.slurm
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_cluster
|
4 |
+
#SBATCH --time=00:59:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --nodes=2
|
7 |
+
#SBATCH --gres=gpu:8
|
8 |
+
#SBATCH --qos=high
|
9 |
+
#SBATCH --ntasks-per-node=1
|
10 |
+
#SBATCH --cpus-per-task=96
|
11 |
+
#SBATCH --exclusive
|
12 |
+
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log.out
|
13 |
+
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log.out
|
14 |
+
|
15 |
+
# Function to update status based on squeue output
|
16 |
+
update_status() {
|
17 |
+
job_id=$1
|
18 |
+
status_file=$2
|
19 |
+
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
+
while true; do
|
21 |
+
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
+
echo "Job status: $job_status"
|
23 |
+
if [ -z "$job_status" ]; then
|
24 |
+
# Job has finished or is not found
|
25 |
+
break
|
26 |
+
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
+
printf "running" > $status_file
|
28 |
+
break
|
29 |
+
fi
|
30 |
+
sleep 10
|
31 |
+
done
|
32 |
+
}
|
33 |
+
|
34 |
+
# Misc initializations.
|
35 |
+
echo "========================"
|
36 |
+
echo "START TIME: $(date)"
|
37 |
+
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
+
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
+
echo python3 version = $(python3 --version)
|
40 |
+
echo "========================"
|
41 |
+
|
42 |
+
# Slurm stuff
|
43 |
+
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
+
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
+
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
+
|
47 |
+
export TMPDIR=/scratch
|
48 |
+
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
+
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
+
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
+
|
52 |
+
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
+
|
54 |
+
|
55 |
+
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
+
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/config.yaml"
|
57 |
+
|
58 |
+
LAUNCHER="torchrun \
|
59 |
+
--nproc_per_node 8 \
|
60 |
+
--nnodes 2 \
|
61 |
+
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
+
--rdzv_backend c10d \
|
63 |
+
--max_restarts 0 \
|
64 |
+
--tee 3 \
|
65 |
+
--node_rank ${SLURM_PROCID}"
|
66 |
+
|
67 |
+
# Checkout the bench_cluster branch
|
68 |
+
cd $NANOTRON_REPO
|
69 |
+
git checkout bench_cluster
|
70 |
+
cd ..
|
71 |
+
# Get the current job ID
|
72 |
+
job_id=${SLURM_JOB_ID}
|
73 |
+
|
74 |
+
# Update status to "pending" or "running" in the background
|
75 |
+
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/status.txt &
|
76 |
+
|
77 |
+
# Run the main command
|
78 |
+
srun -u $LAUNCHER $CMD
|
79 |
+
exit_status=$?
|
80 |
+
|
81 |
+
# Update status based on the exit status of `srun`
|
82 |
+
if [ $exit_status -eq 0 ]; then
|
83 |
+
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/status.txt
|
84 |
+
else
|
85 |
+
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log.out; then
|
86 |
+
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/status.txt
|
87 |
+
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log.out; then
|
88 |
+
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/status.txt
|
89 |
+
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log.out; then
|
90 |
+
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/status.txt
|
91 |
+
else
|
92 |
+
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/status.txt
|
93 |
+
fi
|
94 |
+
fi
|
95 |
+
|
96 |
+
# Run the report script if the job completed successfully
|
97 |
+
if [ $exit_status -eq 0 ]; then
|
98 |
+
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8 --is_logs
|
99 |
+
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8 --is_profiler
|
100 |
+
fi
|
101 |
+
|
102 |
+
|
103 |
+
# Push to hub the folder using huggingface_cli
|
104 |
+
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8 llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8 --commit-message "Upload llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8"
|
105 |
+
|
106 |
+
# Verify the upload
|
107 |
+
if [ $? -eq 0 ]; then
|
108 |
+
echo "Uploading to Huggingface Hub successful"
|
109 |
+
else
|
110 |
+
echo "Failed to upload to Huggingface Hub"
|
111 |
+
fi
|
llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/config.yaml
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
general:
|
2 |
+
project: bench_cluster
|
3 |
+
seed: 42
|
4 |
+
model:
|
5 |
+
ddp_bucket_cap_mb: 25
|
6 |
+
dtype: bfloat16
|
7 |
+
init_method:
|
8 |
+
std: 0.025
|
9 |
+
make_vocab_size_divisible_by: 1
|
10 |
+
model_config:
|
11 |
+
bos_token_id: 1
|
12 |
+
eos_token_id: 2
|
13 |
+
hidden_act: silu
|
14 |
+
hidden_size: 2048
|
15 |
+
initializer_range: 0.02
|
16 |
+
intermediate_size: 4096
|
17 |
+
is_llama_config: true
|
18 |
+
max_position_embeddings: 4096
|
19 |
+
num_attention_heads: 32
|
20 |
+
num_hidden_layers: 24
|
21 |
+
num_key_value_heads: 32
|
22 |
+
pad_token_id: null
|
23 |
+
pretraining_tp: 1
|
24 |
+
rms_norm_eps: 1.0e-05
|
25 |
+
rope_scaling: null
|
26 |
+
rope_theta: 10000.0
|
27 |
+
tie_word_embeddings: true
|
28 |
+
use_cache: true
|
29 |
+
vocab_size: 50257
|
30 |
+
optimizer:
|
31 |
+
accumulate_grad_in_fp32: true
|
32 |
+
clip_grad: 1.0
|
33 |
+
learning_rate_scheduler:
|
34 |
+
learning_rate: 0.0001
|
35 |
+
lr_decay_style: linear
|
36 |
+
lr_warmup_style: linear
|
37 |
+
lr_warmup_steps: 1
|
38 |
+
min_decay_lr: 1.0e-05
|
39 |
+
optimizer_factory:
|
40 |
+
adam_beta1: 0.9
|
41 |
+
adam_beta2: 0.95
|
42 |
+
adam_eps: 1.0e-08
|
43 |
+
name: adamW
|
44 |
+
torch_adam_is_fused: true
|
45 |
+
weight_decay: 0.01
|
46 |
+
zero_stage: 1
|
47 |
+
parallelism:
|
48 |
+
dp: 1
|
49 |
+
expert_parallel_size: 1
|
50 |
+
pp: 2
|
51 |
+
pp_engine: 1f1b
|
52 |
+
tp: 8
|
53 |
+
tp_linear_async_communication: false
|
54 |
+
tp_mode: REDUCE_SCATTER
|
55 |
+
profiler:
|
56 |
+
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8
|
57 |
+
tokenizer:
|
58 |
+
tokenizer_max_length: null
|
59 |
+
tokenizer_name_or_path: openai-community/gpt2
|
60 |
+
tokenizer_revision: null
|
61 |
+
data_stages:
|
62 |
+
- name: Training Stage
|
63 |
+
start_training_step: 1
|
64 |
+
data:
|
65 |
+
dataset:
|
66 |
+
dataset_overwrite_cache: false
|
67 |
+
dataset_processing_num_proc_per_process: 64
|
68 |
+
hf_dataset_config_name: null
|
69 |
+
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
+
hf_dataset_splits: train
|
71 |
+
text_column_name: text
|
72 |
+
num_loading_workers: 32
|
73 |
+
seed: 42
|
74 |
+
lighteval: null
|
75 |
+
tokens:
|
76 |
+
train_steps: 20
|
77 |
+
val_check_interval: -1
|
78 |
+
batch_accumulation_per_replica: 128
|
79 |
+
limit_test_batches: 0
|
80 |
+
limit_val_batches: 0
|
81 |
+
micro_batch_size: 8
|
82 |
+
sequence_length: 4096
|
83 |
+
logging:
|
84 |
+
iteration_step_info_interval: 1
|
85 |
+
log_level: info
|
86 |
+
log_level_replica: info
|
87 |
+
checkpoints:
|
88 |
+
checkpoint_interval: 100000
|
89 |
+
checkpoints_path: /dev/null
|
90 |
+
resume_checkpoint_path: null
|
llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log.out
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/log_metrics.csv
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
iteration,consumed_tokens,elapsed_time_per_iteration_ms,tokens_per_sec,tokens_per_sec_per_gpu,global_batch_size,lm_loss,lr,model_tflops_per_gpu,hardware_tflops_per_gpu,grad_norm,memory_usage_MiB,peak_allocated_MiB,peak_reserved_MiB
|
2 |
+
1,4190000.0000000005,42000.0,99900.0,6240.0,1020.0,11.2,0.0001,56.6,56.6,12.1,1572.43,16549.94,16842.0
|
3 |
+
2,8390000.0,18500.0,227000.0,14200.0,1020.0,11.2,9.53e-05,129.0,129.0,12.2,1572.43,1572.47,16842.0
|
4 |
+
3,12600000.0,18600.0,225000.0,14100.0,1020.0,10.0,9.05e-05,128.0,128.0,51.7,1572.43,16549.94,16842.0
|
5 |
+
4,16800000.0,20400.0,206000.0,12900.0,1020.0,11.7,8.58e-05,117.0,117.0,18.3,1572.43,1572.47,16842.0
|
6 |
+
5,21000000.0,20400.0,205000.0,12800.0,1020.0,10.4,8.11e-05,116.0,116.0,16.0,1572.43,16549.94,16842.0
|
7 |
+
6,25200000.0,20400.0,205000.0,12800.0,1020.0,9.9,7.63e-05,117.0,117.0,9.06,1572.43,16549.94,16842.0
|
8 |
+
7,29400000.0,399000.0,10500.0,657.0,1020.0,9.37,7.16e-05,5.96,5.96,6.23,1572.43,16549.94,16842.0
|
9 |
+
8,33600000.0,18300.0,229000.0,14300.0,1020.0,8.89,6.68e-05,130.0,130.0,5.76,,,
|
10 |
+
9,37700000.0,18200.0,230000.0,14400.0,1020.0,8.81,6.21e-05,131.0,131.0,11.2,1572.43,16549.94,16842.0
|
11 |
+
10,41900000.0,17900.0,234000.0,14600.0,1020.0,8.34,5.74e-05,133.0,133.0,5.72,1572.43,16549.94,16842.0
|
12 |
+
11,46100000.0,18000.0,233000.0,14600.0,1020.0,8.06,5.26e-05,132.0,132.0,4.91,1572.43,16549.94,16842.0
|
13 |
+
12,50300000.0,18200.0,231000.0,14400.0,1020.0,7.9,4.79e-05,131.0,131.0,4.86,1572.43,16549.94,16842.0
|
14 |
+
13,54500000.0,17900.0,234000.0,14600.0,1020.0,7.75,4.32e-05,133.0,133.0,4.69,1572.43,16549.94,16842.0
|
15 |
+
14,58700000.0,18800.0,223000.0,13900.0,1020.0,7.62,3.84e-05,126.0,126.0,4.69,1572.43,16549.94,16842.0
|
16 |
+
15,62900000.0,18200.0,231000.0,14400.0,1020.0,7.48,3.37e-05,131.0,131.0,4.49,1572.43,16549.94,16842.0
|
17 |
+
16,67099999.99999999,17900.0,234000.0,14600.0,1020.0,7.35,2.89e-05,133.0,133.0,3.99,1572.43,16549.94,16842.0
|
18 |
+
17,71300000.0,18000.0,233000.0,14500.0,1020.0,7.23,2.42e-05,132.0,132.0,3.54,,,
|
19 |
+
18,75500000.0,18000.0,234000.0,14600.0,1020.0,7.16,1.95e-05,133.0,133.0,3.28,1572.43,16549.94,16842.0
|
20 |
+
19,79700000.0,18200.0,231000.0,14400.0,1020.0,7.09,1.47e-05,131.0,131.0,3.2,1572.43,16549.94,16842.0
|
21 |
+
20,83900000.0,18700.0,224000.0,14000.0,1020.0,7.03,1e-05,127.0,127.0,3.1,,,
|
llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/profiler.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward,backward
|
2 |
+
1ms 74μs,0ms 533μs
|
llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/profiler/ip-26-0-160-225_1190124.1719935492562546823.pt.trace.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8aa2342444d7c23d4bad0f99a42cf1b0d520a80492c9f27359945191f6012e0a
|
3 |
+
size 10576635127
|
llama-1B/16_GPUS/dp-1_tp-8_pp-2_mbz-8/status.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
completed
|