diff --git "a/attnserver.run_attnserver.slurm.sh.343200.err.log" "b/attnserver.run_attnserver.slurm.sh.343200.err.log" --- "a/attnserver.run_attnserver.slurm.sh.343200.err.log" +++ "b/attnserver.run_attnserver.slurm.sh.343200.err.log" @@ -49117,3 +49117,1387 @@ W0621 21:18:18.988000 3367613 site-packages/torch/distributed/run.py:766] W0621 21:18:18.988000 3367613 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:18:18.988000 3367613 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:18:18.988000 3367613 site-packages/torch/distributed/run.py:766] ***************************************** +[rank24]:[W621 21:18:42.554357258 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank8]:[W621 21:18:42.035138652 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 21:18:42.391868585 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:18:42.402904442 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank30]:[W621 21:18:42.811481175 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank14]:[W621 21:18:42.290816965 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank22]:[W621 21:18:42.747563206 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank21]:[W621 21:18:42.763113843 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:18:42.424303197 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank29]:[W621 21:18:42.833437618 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank13]:[W621 21:18:42.313957413 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank16]:[W621 21:18:42.787467682 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:18:42.450942546 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank10]:[W621 21:18:42.338323109 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:18:42.454002011 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank12]:[W621 21:18:42.341462752 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank28]:[W621 21:18:42.863060268 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank18]:[W621 21:18:42.794697809 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank20]:[W621 21:18:42.795253105 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank31]:[W621 21:18:42.864798824 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:18:42.456925159 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank26]:[W621 21:18:42.865945000 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:18:42.461739285 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank27]:[W621 21:18:42.870247700 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank23]:[W621 21:18:42.802087846 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank25]:[W621 21:18:42.871020249 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank15]:[W621 21:18:42.350285282 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank11]:[W621 21:18:42.351569140 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank19]:[W621 21:18:42.805579574 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank9]:[W621 21:18:42.352847405 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:18:42.466109828 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank17]:[W621 21:18:42.808308185 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank22]: Traceback (most recent call last): +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank22]: pretrain( +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank22]: iteration, num_floating_point_operations_so_far = train( +[rank22]: ^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank22]: ) = train_step( +[rank22]: ^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank22]: losses_reduced = forward_backward_func( +[rank22]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank22]: output_tensor, num_tokens = forward_step( +[rank22]: ^^^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank22]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank22]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank22]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank22]: batch = next(global_batches) +[rank22]: ^^^^^^^^^^^^^^^^^^^^ +[rank22]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank22]: attention_mask = torch.ones( +[rank22]: ^^^^^^^^^^^ +[rank22]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank21]: Traceback (most recent call last): +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank21]: pretrain( +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank21]: iteration, num_floating_point_operations_so_far = train( +[rank21]: ^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank21]: ) = train_step( +[rank21]: ^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank21]: losses_reduced = forward_backward_func( +[rank21]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank21]: output_tensor, num_tokens = forward_step( +[rank21]: ^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank21]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank21]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank21]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank21]: batch = next(global_batches) +[rank21]: ^^^^^^^^^^^^^^^^^^^^ +[rank21]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank21]: attention_mask = torch.ones( +[rank21]: ^^^^^^^^^^^ +[rank21]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank19]: Traceback (most recent call last): +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank19]: pretrain( +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank19]: iteration, num_floating_point_operations_so_far = train( +[rank19]: ^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank19]: ) = train_step( +[rank19]: ^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank19]: losses_reduced = forward_backward_func( +[rank19]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank19]: output_tensor, num_tokens = forward_step( +[rank19]: ^^^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank19]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank19]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank19]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: Traceback (most recent call last): +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank6]: pretrain( +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank6]: iteration, num_floating_point_operations_so_far = train( +[rank6]: ^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank6]: ) = train_step( +[rank6]: ^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank6]: losses_reduced = forward_backward_func( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank19]: batch = next(global_batches) +[rank19]: ^^^^^^^^^^^^^^^^^^^^ +[rank19]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank19]: attention_mask = torch.ones( +[rank19]: ^^^^^^^^^^^ +[rank19]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank6]: output_tensor, num_tokens = forward_step( +[rank6]: ^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank6]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank6]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank6]: batch = next(global_batches) +[rank6]: ^^^^^^^^^^^^^^^^^^^^ +[rank30]: Traceback (most recent call last): +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank30]: pretrain( +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank30]: iteration, num_floating_point_operations_so_far = train( +[rank30]: ^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank30]: ) = train_step( +[rank30]: ^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank30]: losses_reduced = forward_backward_func( +[rank30]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank6]: attention_mask = torch.ones( +[rank6]: ^^^^^^^^^^^ +[rank6]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank30]: output_tensor, num_tokens = forward_step( +[rank30]: ^^^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank30]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank30]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank30]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: Traceback (most recent call last): +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank7]: pretrain( +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank7]: iteration, num_floating_point_operations_so_far = train( +[rank7]: ^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank7]: ) = train_step( +[rank7]: ^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank7]: losses_reduced = forward_backward_func( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank30]: batch = next(global_batches) +[rank30]: ^^^^^^^^^^^^^^^^^^^^ +[rank30]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank30]: attention_mask = torch.ones( +[rank30]: ^^^^^^^^^^^ +[rank30]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank7]: output_tensor, num_tokens = forward_step( +[rank7]: ^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank7]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank7]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank7]: batch = next(global_batches) +[rank7]: ^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank7]: attention_mask = torch.ones( +[rank7]: ^^^^^^^^^^^ +[rank7]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank3]: Traceback (most recent call last): +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank3]: pretrain( +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank3]: iteration, num_floating_point_operations_so_far = train( +[rank3]: ^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank3]: ) = train_step( +[rank3]: ^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank3]: losses_reduced = forward_backward_func( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank3]: output_tensor, num_tokens = forward_step( +[rank3]: ^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank3]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank3]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank3]: batch = next(global_batches) +[rank3]: ^^^^^^^^^^^^^^^^^^^^ +[rank29]: Traceback (most recent call last): +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank29]: pretrain( +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank29]: iteration, num_floating_point_operations_so_far = train( +[rank29]: ^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank29]: ) = train_step( +[rank29]: ^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank29]: losses_reduced = forward_backward_func( +[rank29]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: Traceback (most recent call last): +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank14]: pretrain( +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank14]: iteration, num_floating_point_operations_so_far = train( +[rank14]: ^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank14]: ) = train_step( +[rank14]: ^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank14]: losses_reduced = forward_backward_func( +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank3]: attention_mask = torch.ones( +[rank3]: ^^^^^^^^^^^ +[rank3]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank29]: output_tensor, num_tokens = forward_step( +[rank29]: ^^^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank29]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank29]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank29]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank14]: output_tensor, num_tokens = forward_step( +[rank14]: ^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank14]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank14]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank14]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank29]: batch = next(global_batches) +[rank29]: ^^^^^^^^^^^^^^^^^^^^ +[rank29]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank29]: attention_mask = torch.ones( +[rank29]: ^^^^^^^^^^^ +[rank29]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank14]: batch = next(global_batches) +[rank14]: ^^^^^^^^^^^^^^^^^^^^ +[rank14]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank14]: attention_mask = torch.ones( +[rank14]: ^^^^^^^^^^^ +[rank14]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank31]: Traceback (most recent call last): +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank31]: pretrain( +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank31]: iteration, num_floating_point_operations_so_far = train( +[rank31]: ^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank31]: ) = train_step( +[rank31]: ^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank31]: losses_reduced = forward_backward_func( +[rank31]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank20]: Traceback (most recent call last): +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank20]: pretrain( +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank20]: iteration, num_floating_point_operations_so_far = train( +[rank20]: ^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank20]: ) = train_step( +[rank20]: ^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank20]: losses_reduced = forward_backward_func( +[rank20]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: Traceback (most recent call last): +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank13]: pretrain( +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank13]: iteration, num_floating_point_operations_so_far = train( +[rank13]: ^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank13]: ) = train_step( +[rank13]: ^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank13]: losses_reduced = forward_backward_func( +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank31]: output_tensor, num_tokens = forward_step( +[rank31]: ^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank31]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank31]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank31]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank20]: output_tensor, num_tokens = forward_step( +[rank20]: ^^^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank20]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank20]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank20]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank13]: output_tensor, num_tokens = forward_step( +[rank13]: ^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank31]: batch = next(global_batches) +[rank31]: ^^^^^^^^^^^^^^^^^^^^ +[rank31]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank31]: attention_mask = torch.ones( +[rank31]: ^^^^^^^^^^^ +[rank31]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank20]: batch = next(global_batches) +[rank20]: ^^^^^^^^^^^^^^^^^^^^ +[rank20]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank20]: attention_mask = torch.ones( +[rank20]: ^^^^^^^^^^^ +[rank20]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank13]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank13]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank13]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank13]: batch = next(global_batches) +[rank13]: ^^^^^^^^^^^^^^^^^^^^ +[rank13]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank13]: attention_mask = torch.ones( +[rank13]: ^^^^^^^^^^^ +[rank27]: Traceback (most recent call last): +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank27]: pretrain( +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank27]: iteration, num_floating_point_operations_so_far = train( +[rank27]: ^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank27]: ) = train_step( +[rank27]: ^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank27]: losses_reduced = forward_backward_func( +[rank27]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank13]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank12]: Traceback (most recent call last): +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank12]: pretrain( +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank12]: iteration, num_floating_point_operations_so_far = train( +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank27]: output_tensor, num_tokens = forward_step( +[rank27]: ^^^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank27]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank27]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank27]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: ^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank12]: ) = train_step( +[rank12]: ^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank12]: losses_reduced = forward_backward_func( +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank12]: output_tensor, num_tokens = forward_step( +[rank12]: ^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank12]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank2]: Traceback (most recent call last): +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank2]: pretrain( +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank2]: iteration, num_floating_point_operations_so_far = train( +[rank2]: ^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank2]: ) = train_step( +[rank2]: ^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank2]: losses_reduced = forward_backward_func( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank27]: batch = next(global_batches) +[rank27]: ^^^^^^^^^^^^^^^^^^^^ +[rank27]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank27]: attention_mask = torch.ones( +[rank27]: ^^^^^^^^^^^ +[rank27]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank12]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank12]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank12]: batch = next(global_batches) +[rank12]: ^^^^^^^^^^^^^^^^^^^^ +[rank12]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank12]: attention_mask = torch.ones( +[rank12]: ^^^^^^^^^^^ +[rank2]: output_tensor, num_tokens = forward_step( +[rank2]: ^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank2]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank2]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank2]: batch = next(global_batches) +[rank2]: ^^^^^^^^^^^^^^^^^^^^ +[rank23]: Traceback (most recent call last): +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank23]: pretrain( +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank23]: iteration, num_floating_point_operations_so_far = train( +[rank23]: ^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank23]: ) = train_step( +[rank23]: ^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank23]: losses_reduced = forward_backward_func( +[rank23]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank12]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank2]: attention_mask = torch.ones( +[rank2]: ^^^^^^^^^^^ +[rank2]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank23]: output_tensor, num_tokens = forward_step( +[rank23]: ^^^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank23]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank23]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank23]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank23]: batch = next(global_batches) +[rank23]: ^^^^^^^^^^^^^^^^^^^^ +[rank23]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank23]: attention_mask = torch.ones( +[rank23]: ^^^^^^^^^^^ +[rank23]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank17]: Traceback (most recent call last): +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank17]: pretrain( +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank17]: iteration, num_floating_point_operations_so_far = train( +[rank17]: ^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank17]: ) = train_step( +[rank17]: ^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank17]: losses_reduced = forward_backward_func( +[rank17]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank17]: output_tensor, num_tokens = forward_step( +[rank17]: ^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank17]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank17]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank17]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: Traceback (most recent call last): +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank8]: pretrain( +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank8]: iteration, num_floating_point_operations_so_far = train( +[rank8]: ^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank8]: ) = train_step( +[rank8]: ^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank8]: losses_reduced = forward_backward_func( +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank4]: Traceback (most recent call last): +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank4]: pretrain( +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank4]: iteration, num_floating_point_operations_so_far = train( +[rank4]: ^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank4]: ) = train_step( +[rank4]: ^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank4]: losses_reduced = forward_backward_func( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank17]: batch = next(global_batches) +[rank17]: ^^^^^^^^^^^^^^^^^^^^ +[rank17]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank17]: attention_mask = torch.ones( +[rank17]: ^^^^^^^^^^^ +[rank17]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank8]: output_tensor, num_tokens = forward_step( +[rank8]: ^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank8]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank8]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank8]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank8]: batch = next(global_batches) +[rank8]: ^^^^^^^^^^^^^^^^^^^^ +[rank4]: output_tensor, num_tokens = forward_step( +[rank4]: ^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank4]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank4]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank4]: batch = next(global_batches) +[rank4]: ^^^^^^^^^^^^^^^^^^^^ +[rank25]: Traceback (most recent call last): +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank25]: pretrain( +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank25]: iteration, num_floating_point_operations_so_far = train( +[rank25]: ^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank25]: ) = train_step( +[rank25]: ^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank25]: losses_reduced = forward_backward_func( +[rank25]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank18]: Traceback (most recent call last): +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank18]: pretrain( +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank18]: iteration, num_floating_point_operations_so_far = train( +[rank18]: ^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank18]: ) = train_step( +[rank18]: ^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank18]: losses_reduced = forward_backward_func( +[rank18]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank8]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank8]: attention_mask = torch.ones( +[rank8]: ^^^^^^^^^^^ +[rank8]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank4]: attention_mask = torch.ones( +[rank4]: ^^^^^^^^^^^ +[rank4]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank25]: output_tensor, num_tokens = forward_step( +[rank25]: ^^^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank25]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank25]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank25]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank18]: output_tensor, num_tokens = forward_step( +[rank18]: ^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank10]: Traceback (most recent call last): +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank10]: pretrain( +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank10]: iteration, num_floating_point_operations_so_far = train( +[rank10]: ^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank10]: ) = train_step( +[rank10]: ^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank10]: losses_reduced = forward_backward_func( +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: Traceback (most recent call last): +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank5]: pretrain( +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank5]: iteration, num_floating_point_operations_so_far = train( +[rank5]: ^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank5]: ) = train_step( +[rank5]: ^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank5]: losses_reduced = forward_backward_func( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank25]: batch = next(global_batches) +[rank25]: ^^^^^^^^^^^^^^^^^^^^ +[rank25]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank25]: attention_mask = torch.ones( +[rank25]: ^^^^^^^^^^^ +[rank25]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank18]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank18]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank18]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank18]: batch = next(global_batches) +[rank18]: ^^^^^^^^^^^^^^^^^^^^ +[rank18]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank18]: attention_mask = torch.ones( +[rank18]: ^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank10]: output_tensor, num_tokens = forward_step( +[rank10]: ^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank10]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank10]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank10]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: output_tensor, num_tokens = forward_step( +[rank5]: ^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank5]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank5]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank5]: batch = next(global_batches) +[rank5]: ^^^^^^^^^^^^^^^^^^^^ +[rank28]: Traceback (most recent call last): +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank28]: pretrain( +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank28]: iteration, num_floating_point_operations_so_far = train( +[rank28]: ^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank28]: ) = train_step( +[rank28]: ^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank28]: losses_reduced = forward_backward_func( +[rank28]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank18]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank10]: batch = next(global_batches) +[rank10]: ^^^^^^^^^^^^^^^^^^^^ +[rank10]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank10]: attention_mask = torch.ones( +[rank10]: ^^^^^^^^^^^ +[rank10]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.14 GiB is free. Including non-PyTorch memory, this process has 8.67 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank5]: attention_mask = torch.ones( +[rank5]: ^^^^^^^^^^^ +[rank5]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank28]: output_tensor, num_tokens = forward_step( +[rank28]: ^^^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank28]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank28]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank28]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank16]: Traceback (most recent call last): +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank16]: pretrain( +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank16]: iteration, num_floating_point_operations_so_far = train( +[rank16]: ^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank16]: ) = train_step( +[rank16]: ^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank16]: losses_reduced = forward_backward_func( +[rank16]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: Traceback (most recent call last): +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank15]: pretrain( +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank15]: iteration, num_floating_point_operations_so_far = train( +[rank15]: ^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank15]: ) = train_step( +[rank15]: ^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank15]: losses_reduced = forward_backward_func( +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: Traceback (most recent call last): +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank1]: pretrain( +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank1]: iteration, num_floating_point_operations_so_far = train( +[rank1]: ^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank1]: ) = train_step( +[rank1]: ^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank1]: losses_reduced = forward_backward_func( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank28]: batch = next(global_batches) +[rank28]: ^^^^^^^^^^^^^^^^^^^^ +[rank28]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank28]: attention_mask = torch.ones( +[rank28]: ^^^^^^^^^^^ +[rank28]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank16]: output_tensor, num_tokens = forward_step( +[rank16]: ^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank16]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank16]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank16]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank15]: output_tensor, num_tokens = forward_step( +[rank15]: ^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank15]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank15]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank15]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: output_tensor, num_tokens = forward_step( +[rank1]: ^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank1]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank1]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank1]: batch = next(global_batches) +[rank1]: ^^^^^^^^^^^^^^^^^^^^ +[rank26]: Traceback (most recent call last): +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank26]: pretrain( +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank26]: iteration, num_floating_point_operations_so_far = train( +[rank26]: ^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank26]: ) = train_step( +[rank26]: ^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank26]: losses_reduced = forward_backward_func( +[rank26]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank16]: batch = next(global_batches) +[rank16]: ^^^^^^^^^^^^^^^^^^^^ +[rank16]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank16]: attention_mask = torch.ones( +[rank16]: ^^^^^^^^^^^ +[rank16]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank15]: batch = next(global_batches) +[rank15]: ^^^^^^^^^^^^^^^^^^^^ +[rank15]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank15]: attention_mask = torch.ones( +[rank15]: ^^^^^^^^^^^ +[rank15]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank1]: attention_mask = torch.ones( +[rank1]: ^^^^^^^^^^^ +[rank1]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank26]: output_tensor, num_tokens = forward_step( +[rank26]: ^^^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank26]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank26]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank26]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: Traceback (most recent call last): +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank11]: pretrain( +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank11]: iteration, num_floating_point_operations_so_far = train( +[rank11]: ^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank11]: ) = train_step( +[rank11]: ^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank11]: losses_reduced = forward_backward_func( +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank0]: iteration, num_floating_point_operations_so_far = train( +[rank0]: ^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank0]: ) = train_step( +[rank0]: ^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank0]: losses_reduced = forward_backward_func( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank26]: batch = next(global_batches) +[rank26]: ^^^^^^^^^^^^^^^^^^^^ +[rank26]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank26]: attention_mask = torch.ones( +[rank26]: ^^^^^^^^^^^ +[rank26]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank11]: output_tensor, num_tokens = forward_step( +[rank11]: ^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank11]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank11]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank11]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: output_tensor, num_tokens = forward_step( +[rank0]: ^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank0]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank0]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank0]: batch = next(global_batches) +[rank0]: ^^^^^^^^^^^^^^^^^^^^ +[rank24]: Traceback (most recent call last): +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank24]: pretrain( +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank24]: iteration, num_floating_point_operations_so_far = train( +[rank24]: ^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank24]: ) = train_step( +[rank24]: ^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank24]: losses_reduced = forward_backward_func( +[rank24]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank11]: batch = next(global_batches) +[rank11]: ^^^^^^^^^^^^^^^^^^^^ +[rank11]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank11]: attention_mask = torch.ones( +[rank11]: ^^^^^^^^^^^ +[rank11]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank0]: attention_mask = torch.ones( +[rank0]: ^^^^^^^^^^^ +[rank0]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.17 GiB is free. Including non-PyTorch memory, this process has 8.63 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank24]: output_tensor, num_tokens = forward_step( +[rank24]: ^^^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank24]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank24]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank24]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: Traceback (most recent call last): +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank9]: pretrain( +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 863, in pretrain +[rank9]: iteration, num_floating_point_operations_so_far = train( +[rank9]: ^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 2229, in train +[rank9]: ) = train_step( +[rank9]: ^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1382, in train_step +[rank9]: losses_reduced = forward_backward_func( +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 518, in forward_backward_no_pipelining +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank24]: batch = next(global_batches) +[rank24]: ^^^^^^^^^^^^^^^^^^^^ +[rank24]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank24]: attention_mask = torch.ones( +[rank24]: ^^^^^^^^^^^ +[rank24]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.16 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank9]: output_tensor, num_tokens = forward_step( +[rank9]: ^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/pipeline_parallel/schedules.py", line 289, in forward_step +[rank9]: output_tensor, loss_func = forward_step_func(data_iterator, model) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step +[rank9]: (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator) +[rank9]: ^^^^^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch +[rank9]: batch = next(global_batches) +[rank9]: ^^^^^^^^^^^^^^^^^^^^ +[rank9]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches +[rank9]: attention_mask = torch.ones( +[rank9]: ^^^^^^^^^^^ +[rank9]: torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 524288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.15 GiB is free. Including non-PyTorch memory, this process has 8.65 GiB memory in use. Of the allocated memory 7.02 GiB is allocated by PyTorch, and 111.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +[rank3]:[W621 21:18:57.492985540 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 21:18:57.514744847 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 21:18:57.606304392 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 21:18:57.612537187 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 21:18:57.623552814 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 21:18:57.628497419 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank11]:[W621 21:18:57.516178219 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank21]:[W621 21:18:57.982950308 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank22]:[W621 21:18:57.988431638 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank12]:[W621 21:18:57.561029732 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank10]:[W621 21:18:57.569799373 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank23]:[W621 21:18:57.052838784 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank17]:[W621 21:18:57.066515913 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 21:18:57.747437094 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank14]:[W621 21:18:57.646743323 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank27]:[W621 21:18:57.259225503 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank31]:[W621 21:18:57.272514379 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank28]:[W621 21:18:57.304177683 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank30]:[W621 21:18:57.309551879 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank19]:[W621 21:18:57.241249471 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank15]:[W621 21:18:57.801823915 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank18]:[W621 21:18:57.264390643 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank29]:[W621 21:18:57.333811722 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank26]:[W621 21:18:57.353360486 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank13]:[W621 21:18:57.832855414 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank20]:[W621 21:18:57.286132690 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank9]:[W621 21:18:57.845499305 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank25]:[W621 21:18:57.367318431 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 21:18:58.751000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067009 closing signal SIGTERM +W0621 21:18:58.753000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067010 closing signal SIGTERM +W0621 21:18:58.754000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067011 closing signal SIGTERM +W0621 21:18:58.755000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067012 closing signal SIGTERM +W0621 21:18:58.755000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067013 closing signal SIGTERM +W0621 21:18:58.755000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067014 closing signal SIGTERM +W0621 21:18:58.755000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2067015 closing signal SIGTERM +W0621 21:18:58.775000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147926 closing signal SIGTERM +W0621 21:18:58.778000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147927 closing signal SIGTERM +W0621 21:18:58.779000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147928 closing signal SIGTERM +W0621 21:18:58.779000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147929 closing signal SIGTERM +W0621 21:18:58.779000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147930 closing signal SIGTERM +W0621 21:18:58.779000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147933 closing signal SIGTERM +W0621 21:18:58.780000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 147934 closing signal SIGTERM +W0621 21:18:58.802000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436449 closing signal SIGTERM +W0621 21:18:58.804000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436450 closing signal SIGTERM +W0621 21:18:58.805000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436452 closing signal SIGTERM +W0621 21:18:58.806000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436453 closing signal SIGTERM +W0621 21:18:58.806000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436454 closing signal SIGTERM +W0621 21:18:58.806000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436455 closing signal SIGTERM +W0621 21:18:58.807000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3436456 closing signal SIGTERM +W0621 21:18:58.806000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367683 closing signal SIGTERM +W0621 21:18:58.809000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367684 closing signal SIGTERM +W0621 21:18:58.809000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367685 closing signal SIGTERM +W0621 21:18:58.810000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367686 closing signal SIGTERM +W0621 21:18:58.810000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367687 closing signal SIGTERM +W0621 21:18:58.810000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3367690 closing signal SIGTERM +E0621 21:18:59.071000 147855 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 147931) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:18:58 + host : fs-mbz-gpu-852 + rank : 5 (local_rank: 5) + exitcode : 1 (pid: 147931) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ +E0621 21:18:59.184000 2066939 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 2067016) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +E0621 21:18:59.185000 3436378 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 3436451) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +W0621 21:18:59.195000 2066939 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2066939_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. +[W621 21:18:59.635175327 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:54376, remote=[fs-mbz-gpu-852]:29500): Broken pipe +Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x152a5c5785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) +frame #1: + 0x5ba8afe (0x152a4545aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #2: + 0x5baa358 (0x152a4545c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #3: + 0x5babb3e (0x152a4545db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x152a45457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x152a45457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +[W621 21:18:59.114490531 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:55556, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes +Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x154a281785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) +frame #1: + 0x5ba8afe (0x154a1145aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #2: + 0x5baa0d0 (0x154a1145c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x152a45458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #7: + 0xc0f526 (0x152a5478b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) +frame #8: + 0x37f17d (0x152a53efb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) + +frame #26: + 0x29d90 (0x152a5d5add90 in /lib/x86_64-linux-gnu/libc.so.6) +frame #27: __libc_start_main + 0x80 (0x152a5d5ade40 in /lib/x86_64-linux-gnu/libc.so.6) + +frame #3: + 0x5baa81d (0x154a1145c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #4: + 0x5bab4a9 (0x154a1145d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x154a114574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #6: + 0xc0f919 (0x154a2078b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) +E0621 21:18:59.203000 3367613 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 5 (pid: 3367688) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +frame #7: + 0x37f17d (0x154a1fefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) + +frame #25: + 0x29d90 (0x154a294f1d90 in /lib/x86_64-linux-gnu/libc.so.6) +frame #26: __libc_start_main + 0x80 (0x154a294f1e40 in /lib/x86_64-linux-gnu/libc.so.6) + +W0621 21:18:59.207000 2066939 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2066939_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. +W0621 21:18:59.208000 3436378 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3436378_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. +[W621 21:18:59.646254020 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=4, addr=[fs-mbz-gpu-901]:54376, remote=[fs-mbz-gpu-852]:29500): Broken pipe +Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x152a5c5785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) +frame #1: + 0x5ba8afe (0x152a4545aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #2: + 0x5baa358 (0x152a4545c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #3: + 0x5babb3e (0x152a4545db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #4: c10d::TCPStore::doWait(c10::ArrayRef, std::allocator > >, std::chrono::duration >) + 0x1a6 (0x152a45457ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string, std::allocator > const&) + 0x33 (0x152a45457ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #6: c10d::TCPStore::get(std::__cxx11::basic_string, std::allocator > const&) + 0xab (0x152a45458f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #7: + 0xc0f526 (0x152a5478b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) +frame #8: + 0x37f17d (0x152a53efb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) + +frame #26: + 0x29d90 (0x152a5d5add90 in /lib/x86_64-linux-gnu/libc.so.6) +frame #27: __libc_start_main + 0x80 (0x152a5d5ade40 in /lib/x86_64-linux-gnu/libc.so.6) + +[W621 21:18:59.127001947 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:55556, remote=[fs-mbz-gpu-852]:29500): Broken pipe +Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x154a281785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) +frame #1: + 0x5ba8afe (0x154a1145aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #2: + 0x5baa358 (0x154a1145c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #3: + 0x5babb3e (0x154a1145db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x154a11457569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #5: + 0xc0f919 (0x154a2078b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) +frame #6: + 0x37f17d (0x154a1fefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) + +frame #24: + 0x29d90 (0x154a294f1d90 in /lib/x86_64-linux-gnu/libc.so.6) +frame #25: __libc_start_main + 0x80 (0x154a294f1e40 in /lib/x86_64-linux-gnu/libc.so.6) + +W0621 21:18:59.217000 2066939 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-901_2066939_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) +W0621 21:18:59.218000 3436378 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3436378_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent +[W621 21:18:59.585911183 TCPStore.cpp:115] [c10d] recvVector failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:36364, remote=[fs-mbz-gpu-852]:29500): failed to recv, got 0 bytes +Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1505e37785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) +frame #1: + 0x5ba8afe (0x1505cc65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #2: + 0x5baa0d0 (0x1505cc65c0d0 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:18:58 + host : fs-mbz-gpu-901 + rank : 31 (local_rank: 7) + exitcode : 1 (pid: 2067016) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ +frame #3: + 0x5baa81d (0x1505cc65c81d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #4: + 0x5bab4a9 (0x1505cc65d4a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #5: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x1fb (0x1505cc6574cb in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #6: + 0xc0f919 (0x1505db98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) +frame #7: + 0x37f17d (0x1505db0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) + +frame #25: + 0x29d90 (0x1505e4777d90 in /lib/x86_64-linux-gnu/libc.so.6) +frame #26: __libc_start_main + 0x80 (0x1505e4777e40 in /lib/x86_64-linux-gnu/libc.so.6) + +[W621 21:18:59.136369568 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-870]:55556, remote=[fs-mbz-gpu-852]:29500): Broken pipe +Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x154a281785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) +frame #1: + 0x5ba8afe (0x154a1145aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #2: + 0x5baa358 (0x154a1145c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #3: + 0x5babb3e (0x154a1145db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x154a11457569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #5: + 0xc0f919 (0x154a2078b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) +frame #6: + 0x37f17d (0x154a1fefb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) + +frame #24: + 0x29d90 (0x154a294f1d90 in /lib/x86_64-linux-gnu/libc.so.6) +frame #25: __libc_start_main + 0x80 (0x154a294f1e40 in /lib/x86_64-linux-gnu/libc.so.6) + +W0621 21:18:59.226000 3367613 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3367613_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. +W0621 21:18:59.227000 3436378 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-870_3436378_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[W621 21:18:59.598417592 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:36364, remote=[fs-mbz-gpu-852]:29500): Broken pipe +Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1505e37785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) +frame #1: + 0x5ba8afe (0x1505cc65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #2: + 0x5baa358 (0x1505cc65c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) + return arg(*args, **kwargs) +frame #3: + 0x5babb3e (0x1505cc65db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x1505cc657569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #5: + 0xc0f919 (0x1505db98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) +frame #6: + 0x37f17d (0x1505db0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) + + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main +frame #24: + 0x29d90 (0x1505e4777d90 in /lib/x86_64-linux-gnu/libc.so.6) +frame #25: __libc_start_main + 0x80 (0x1505e4777e40 in /lib/x86_64-linux-gnu/libc.so.6) + + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:18:58 + host : fs-mbz-gpu-870 + rank : 10 (local_rank: 2) + exitcode : 1 (pid: 3436451) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ +W0621 21:18:59.236000 3367613 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3367613_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. +[W621 21:18:59.607419381 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-881]:36364, remote=[fs-mbz-gpu-852]:29500): Broken pipe +Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first): +frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string, std::allocator >) + 0x98 (0x1505e37785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so) +frame #1: + 0x5ba8afe (0x1505cc65aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #2: + 0x5baa358 (0x1505cc65c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #3: + 0x5babb3e (0x1505cc65db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #4: c10d::TCPStore::compareSet(std::__cxx11::basic_string, std::allocator > const&, std::vector > const&, std::vector > const&) + 0x299 (0x1505cc657569 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so) +frame #5: + 0xc0f919 (0x1505db98b919 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) +frame #6: + 0x37f17d (0x1505db0fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so) + +frame #24: + 0x29d90 (0x1505e4777d90 in /lib/x86_64-linux-gnu/libc.so.6) +frame #25: __libc_start_main + 0x80 (0x1505e4777e40 in /lib/x86_64-linux-gnu/libc.so.6) + +W0621 21:18:59.245000 3367613 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-881_3367613_0' has failed to shutdown the rendezvous '343200' due to an error of type RendezvousConnectionError. +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: +[1]: + time : 2025-06-21_21:18:58 + host : fs-mbz-gpu-881 + rank : 22 (local_rank: 6) + exitcode : 1 (pid: 3367689) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:18:58 + host : fs-mbz-gpu-881 + rank : 21 (local_rank: 5) + exitcode : 1 (pid: 3367688) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ set +x ++ set +x ++ set +x