diff --git "a/attnserver.run_attnserver.slurm.sh.343207.err.log" "b/attnserver.run_attnserver.slurm.sh.343207.err.log" --- "a/attnserver.run_attnserver.slurm.sh.343207.err.log" +++ "b/attnserver.run_attnserver.slurm.sh.343207.err.log" @@ -139,3 +139,3522 @@ W0621 21:19:26.076000 1511074 site-packages/torch/distributed/run.py:766] W0621 21:19:26.076000 1511074 site-packages/torch/distributed/run.py:766] ***************************************** W0621 21:19:26.076000 1511074 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. W0621 21:19:26.076000 1511074 site-packages/torch/distributed/run.py:766] ***************************************** +[rank5]:[W621 21:19:54.766674400 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:19:55.922773799 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:19:55.924810213 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:19:55.924828218 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:19:55.933231302 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 21:19:55.933312920 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:19:55.933410313 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:19:55.938675731 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain +[rank0]: save_checkpoint( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint +[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy, +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 386, in save +[rank0]: common_strategy.save_common(state_dict, checkpoint_dir) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/common.py", line 48, in save_common +[rank0]: torch.save(common_state_dict, path) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 964, in save +[rank0]: with _open_zipfile_writer(f) as opened_zipfile: +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 828, in _open_zipfile_writer +[rank0]: return container(name_or_buffer) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 792, in __init__ +[rank0]: torch._C.PyTorchFileWriter( +[rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist. +[rank0]:[W621 21:20:44.859505786 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 21:20:47.984000 1511074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1511147 closing signal SIGTERM +W0621 21:20:47.985000 1511074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1511148 closing signal SIGTERM +W0621 21:20:47.989000 1511074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1511149 closing signal SIGTERM +W0621 21:20:47.992000 1511074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1511150 closing signal SIGTERM +W0621 21:20:47.994000 1511074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1511151 closing signal SIGTERM +W0621 21:20:47.997000 1511074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1511152 closing signal SIGTERM +W0621 21:20:47.999000 1511074 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1511153 closing signal SIGTERM +E0621 21:20:49.530000 1511074 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 1511146) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:20:47 + host : fs-mbz-gpu-661 + rank : 0 (local_rank: 0) + exitcode : 1 (pid: 1511146) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=2048 ++ PROF_CTX_LENGTH=2048 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L2048*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L2048*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=2048, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 2048 --max-position-embeddings 2048 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:20:52.788000 1513484 site-packages/torch/distributed/run.py:766] +W0621 21:20:52.788000 1513484 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:20:52.788000 1513484 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:20:52.788000 1513484 site-packages/torch/distributed/run.py:766] ***************************************** +[rank6]:[W621 21:21:15.976620557 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 21:21:15.001452049 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:21:15.006737434 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:21:15.009693737 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:21:15.016113795 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:21:15.018537241 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:21:15.021311076 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:21:15.025918466 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +[rank1]: Traceback (most recent call last): +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank1]: pretrain( +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank1]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank1]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank1]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank1]: return _load_global_dist_base_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank1]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank1]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank1]: checkpoint.load_state_dict( +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank1]: return arg(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank1]: return _load_state_dict( +[rank1]: ^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank1]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank1]: raise result +[rank1]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank1]: Traceback (most recent call last): (RANK 0) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 1) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 2) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 3) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 4) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 5) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 6) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 7) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight + +[rank5]: Traceback (most recent call last): +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank5]: pretrain( +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank5]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank5]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank5]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank5]: return _load_global_dist_base_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank5]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank5]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank5]: checkpoint.load_state_dict( +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank5]: return arg(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank5]: return _load_state_dict( +[rank5]: ^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank5]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank5]: raise result +[rank5]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank5]: Traceback (most recent call last): (RANK 0) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 1) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 2) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 3) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 4) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 5) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 6) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 7) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight + +[rank7]: Traceback (most recent call last): +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank7]: pretrain( +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank7]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank7]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank7]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank7]: return _load_global_dist_base_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank7]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank7]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank7]: checkpoint.load_state_dict( +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank7]: return arg(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank7]: return _load_state_dict( +[rank7]: ^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank7]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank7]: raise result +[rank7]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank7]: Traceback (most recent call last): (RANK 0) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 1) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 2) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 3) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 4) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 5) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 6) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 7) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight + +[rank6]: Traceback (most recent call last): +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank6]: pretrain( +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank6]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank6]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank6]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank6]: return _load_global_dist_base_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank6]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank6]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank6]: checkpoint.load_state_dict( +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank6]: return arg(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank6]: return _load_state_dict( +[rank6]: ^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank6]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank6]: raise result +[rank6]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank6]: Traceback (most recent call last): (RANK 0) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 1) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 2) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 3) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 4) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 5) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 6) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 7) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight + +[rank3]: Traceback (most recent call last): +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank3]: pretrain( +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank3]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank3]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank3]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank3]: return _load_global_dist_base_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank3]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank3]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank3]: checkpoint.load_state_dict( +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank3]: return arg(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank3]: return _load_state_dict( +[rank3]: ^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank3]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank3]: raise result +[rank3]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank3]: Traceback (most recent call last): (RANK 0) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 1) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 2) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 3) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 4) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 5) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 6) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 7) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight + +[rank4]: Traceback (most recent call last): +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank4]: pretrain( +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank4]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank4]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank4]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank4]: return _load_global_dist_base_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank4]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank4]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank4]: checkpoint.load_state_dict( +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank4]: return arg(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank4]: return _load_state_dict( +[rank4]: ^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank4]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank4]: raise result +[rank4]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank4]: Traceback (most recent call last): (RANK 0) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 1) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 2) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 3) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 4) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 5) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 6) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 7) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight + +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank0]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank0]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank0]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank0]: return _load_global_dist_base_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank0]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank0]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank0]: checkpoint.load_state_dict( +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank0]: return arg(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank0]: return _load_state_dict( +[rank0]: ^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank0]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank0]: raise result +[rank0]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank0]: Traceback (most recent call last): (RANK 0) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 1) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 2) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 3) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 4) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 5) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 6) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 7) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight + +[rank2]: Traceback (most recent call last): +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank2]: pretrain( +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank2]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank2]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank2]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank2]: return _load_global_dist_base_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank2]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank2]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank2]: checkpoint.load_state_dict( +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank2]: return arg(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank2]: return _load_state_dict( +[rank2]: ^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank2]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank2]: raise result +[rank2]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank2]: Traceback (most recent call last): (RANK 0) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 1) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 2) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 3) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 4) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 5) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 6) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 7) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([16384, 4096])) and expected ((2048, 4096)) tensor for key embedding.position_embeddings.weight + +[rank3]:[W621 21:21:31.881223996 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 21:21:31.885342964 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 21:21:31.906656872 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 21:21:31.911729053 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 21:21:31.914609914 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 21:21:31.949619892 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank0]:[W621 21:21:31.954945572 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 21:21:31.971186370 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 21:21:31.831000 1513484 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1513555 closing signal SIGTERM +W0621 21:21:31.832000 1513484 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1513556 closing signal SIGTERM +W0621 21:21:31.833000 1513484 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1513557 closing signal SIGTERM +W0621 21:21:31.833000 1513484 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1513558 closing signal SIGTERM +W0621 21:21:31.834000 1513484 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1513559 closing signal SIGTERM +W0621 21:21:31.834000 1513484 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1513560 closing signal SIGTERM +W0621 21:21:31.835000 1513484 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1513561 closing signal SIGTERM +E0621 21:21:32.577000 1513484 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 1513562) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:21:31 + host : fs-mbz-gpu-661 + rank : 7 (local_rank: 7) + exitcode : 1 (pid: 1513562) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=4096 ++ PROF_CTX_LENGTH=4096 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L4096*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L4096*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=4096, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 4096 --max-position-embeddings 4096 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:21:35.451000 1515366 site-packages/torch/distributed/run.py:766] +W0621 21:21:35.451000 1515366 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:21:35.451000 1515366 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:21:35.451000 1515366 site-packages/torch/distributed/run.py:766] ***************************************** +[rank5]:[W621 21:21:56.691480306 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:21:56.710140066 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 21:21:56.718106454 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:21:56.721422771 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:21:56.724282174 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:21:56.728439546 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:21:56.728475616 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:21:56.736818218 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain +[rank0]: save_checkpoint( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 443, in save_checkpoint +[rank0]: ensure_directory_exists(checkpoint_name, check_parent=False) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 126, in ensure_directory_exists +[rank0]: os.makedirs(dirname, exist_ok=True) +[rank0]: File "", line 225, in makedirs +[rank0]: FileNotFoundError: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010' +[rank0]:[W621 21:22:44.741471806 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 21:22:47.763000 1515366 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1515438 closing signal SIGTERM +W0621 21:22:47.766000 1515366 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1515439 closing signal SIGTERM +W0621 21:22:47.770000 1515366 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1515440 closing signal SIGTERM +W0621 21:22:47.772000 1515366 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1515441 closing signal SIGTERM +W0621 21:22:47.774000 1515366 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1515442 closing signal SIGTERM +W0621 21:22:47.775000 1515366 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1515443 closing signal SIGTERM +W0621 21:22:47.778000 1515366 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1515444 closing signal SIGTERM +E0621 21:22:49.242000 1515366 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 1515437) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:22:47 + host : fs-mbz-gpu-661 + rank : 0 (local_rank: 0) + exitcode : 1 (pid: 1515437) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=8192 ++ PROF_CTX_LENGTH=8192 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=8192, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:22:52.604000 1517667 site-packages/torch/distributed/run.py:766] +W0621 21:22:52.604000 1517667 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:22:52.604000 1517667 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:22:52.604000 1517667 site-packages/torch/distributed/run.py:766] ***************************************** +[rank5]:[W621 21:23:13.267768461 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:23:13.484400319 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:23:13.499730331 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 21:23:13.506031560 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:23:13.511798158 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:23:13.514969598 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:23:13.521128043 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:23:13.524586005 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank0]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank0]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank0]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank0]: return _load_global_dist_base_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank0]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank0]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank0]: checkpoint.load_state_dict( +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank0]: return arg(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank0]: return _load_state_dict( +[rank0]: ^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank0]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank0]: raise result +[rank0]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank0]: Traceback (most recent call last): (RANK 0) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank0]: raise KeyError( +[rank0]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank0]: Traceback (most recent call last): (RANK 1) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank0]: raise KeyError( +[rank0]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank0]: Traceback (most recent call last): (RANK 2) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank0]: raise KeyError( +[rank0]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank0]: Traceback (most recent call last): (RANK 3) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank0]: raise KeyError( +[rank0]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank0]: Traceback (most recent call last): (RANK 4) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank0]: raise KeyError( +[rank0]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank0]: Traceback (most recent call last): (RANK 5) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank0]: raise KeyError( +[rank0]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank0]: Traceback (most recent call last): (RANK 6) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank0]: raise KeyError( +[rank0]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank0]: Traceback (most recent call last): (RANK 7) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank0]: raise KeyError( +[rank0]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" + +[rank6]: Traceback (most recent call last): +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank6]: pretrain( +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank6]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank6]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank6]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank6]: return _load_global_dist_base_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank6]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank6]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank6]: checkpoint.load_state_dict( +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank6]: return arg(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank6]: return _load_state_dict( +[rank6]: ^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank6]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank6]: raise result +[rank6]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank6]: Traceback (most recent call last): (RANK 0) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank6]: raise KeyError( +[rank6]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank6]: Traceback (most recent call last): (RANK 1) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank6]: raise KeyError( +[rank6]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank6]: Traceback (most recent call last): (RANK 2) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank6]: raise KeyError( +[rank6]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank6]: Traceback (most recent call last): (RANK 3) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank6]: raise KeyError( +[rank6]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank6]: Traceback (most recent call last): (RANK 4) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank6]: raise KeyError( +[rank6]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank6]: Traceback (most recent call last): (RANK 5) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank6]: raise KeyError( +[rank6]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank6]: Traceback (most recent call last): (RANK 6) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank6]: raise KeyError( +[rank6]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank6]: Traceback (most recent call last): (RANK 7) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank6]: raise KeyError( +[rank6]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" + +[rank1]: Traceback (most recent call last): +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank1]: pretrain( +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank1]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank1]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank1]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank1]: return _load_global_dist_base_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank1]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank1]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank1]: checkpoint.load_state_dict( +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank1]: return arg(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank1]: return _load_state_dict( +[rank1]: ^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank1]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank1]: raise result +[rank1]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank1]: Traceback (most recent call last): (RANK 0) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank1]: raise KeyError( +[rank1]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank1]: Traceback (most recent call last): (RANK 1) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank1]: raise KeyError( +[rank1]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank1]: Traceback (most recent call last): (RANK 2) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank1]: raise KeyError( +[rank1]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank1]: Traceback (most recent call last): (RANK 3) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank1]: raise KeyError( +[rank1]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank1]: Traceback (most recent call last): (RANK 4) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank1]: raise KeyError( +[rank1]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank1]: Traceback (most recent call last): (RANK 5) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank1]: raise KeyError( +[rank1]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.b[rank2]: Traceback (most recent call last): +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank2]: pretrain( +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank2]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank2]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank2]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank2]: return _load_global_dist_base_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank2]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank2]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank2]: checkpoint.load_state_dict( +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank2]: return arg(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank2]: return _load_state_dict( +[rank2]: ^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank2]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank2]: raise result +[rank2]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank2]: Traceback (most recent call last): (RANK 0) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs[rank4]: Traceback (most recent call last): +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank4]: pretrain( +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank4]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank4]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank4]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank4]: return _load_global_dist_base_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank4]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank4]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank4]: checkpoint.load_state_dict( +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank4]: return arg(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank4]: return _load_state_dict( +[rank4]: ^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank4]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank4]: raise result +[rank4]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank4]: Traceback (most recent call last): (RANK 0) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs[rank5]: Traceback (most recent call last): +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank5]: pretrain( +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank5]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank5]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank5]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank5]: return _load_global_dist_base_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank5]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank5]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank5]: checkpoint.load_state_dict( +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank5]: return arg(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank5]: return _load_state_dict( +[rank5]: ^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank5]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank5]: raise result +[rank5]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank5]: Traceback (most recent call last): (RANK 0) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envsias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank1]: Traceback (most recent call last): (RANK 6) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank1]: raise KeyError( +[rank1]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_s[rank7]: Traceback (most recent call last): +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank7]: pretrain( +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank7]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank7]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank7]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank7]: return _load_global_dist_base_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank7]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank7]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank7]: checkpoint.load_state_dict( +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank7]: return arg(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank7]: return _load_state_dict( +[rank7]: ^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank7]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank7]: raise result +[rank7]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank7]: Traceback (most recent call last): (RANK 0) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank2]: raise KeyError( +[rank2]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank2]: Traceback (most recent call last): (RANK 1) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank4]: raise KeyError( +[rank4]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank4]: Traceback (most recent call last): (RANK 1) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank5]: raise KeyError( +[rank5]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank5]: Traceback (most recent call last): (RANK 1) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpointtate/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank1]: Traceback (most recent call last): (RANK 7) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank1]: raise KeyError( +[rank1]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.lin/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank7]: raise KeyError( +[rank7]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank7]: Traceback (most recent call last): (RANK 1) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank4]: raise KeyError( +[rank4]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machi/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank5]: raise KeyError( +[rank5]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machi/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank7]: raise KeyError( +[rank7]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank4]: Traceback (most recent call last): (RANK 2) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank4]: raise KeyError( +[rank4]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.ne_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank5]: Traceback (most recent call last): (RANK 2) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank5]: raise KeyError( +[rank5]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.ne_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank7]: Traceback (most recent call last): (RANK 2) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank7]: raise KeyError( +[rank7]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.[rank3]: Traceback (most recent call last): +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank3]: pretrain( +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank3]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank3]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank3]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank3]: return _load_global_dist_base_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank3]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank3]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank3]: checkpoint.load_state_dict( +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank3]: return arg(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank3]: return _load_state_dict( +[rank3]: ^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank3]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank3]: raise result +[rank3]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank3]: Traceback (most recent call last): (RANK 0) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envsfp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank5]: Traceback (most recent call last): (RANK 3) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank5]: raise KeyError( +[rank5]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bfp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank7]: Traceback (most recent call last): (RANK 3) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank7]: raise KeyError( +[rank7]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank5]: Traceback (most recent call last): (RANK 4) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank5]: raise KeyError( +[rank5]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1ear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" + +ias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank7]: Traceback (most recent call last): (RANK 4) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank7]: raise KeyError( +[rank7]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank2]: raise KeyError( +[rank2]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machifp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank4]: Traceback (most recent call last): (RANK 3) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank4]: raise KeyError( +[rank4]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.b/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank3]: raise KeyError( +[rank3]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank3]: Traceback (most recent call last): (RANK 1) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank5]: Traceback (most recent call last): (RANK 5) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank5]: raise KeyError( +[rank5]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.b.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank7]: Traceback (most recent call last): (RANK 5) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank7]: raise KeyError( +[rank7]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank4]: Traceback (most recent call last): (RANK 4) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank4]: raise KeyError( +[rank4]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1ias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank5]: Traceback (most recent call last): (RANK 6) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank5]: raise KeyError( +[rank5]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_s.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank4]: Traceback (most recent call last): (RANK 5) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank4]: raise KeyError( +[rank4]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.btate/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank5]: Traceback (most recent call last): (RANK 7) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank5]: raise KeyError( +[rank5]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank7]: Traceback (most recent call last): (RANK 6) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank7]: raise KeyError( +[rank7]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_sias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank4]: Traceback (most recent call last): (RANK 6) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank4]: raise KeyError( +[rank4]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_s/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank3]: raise KeyError( +[rank3]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machitate/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank4]: Traceback (most recent call last): (RANK 7) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank4]: raise KeyError( +[rank4]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" + +ne_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank3]: Traceback (most recent call last): (RANK 2) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank3]: raise KeyError( +[rank3]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.ne_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank2]: Traceback (most recent call last): (RANK 2) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank2]: raise KeyError( +[rank2]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank3]: Traceback (most recent call last): (RANK 3) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank3]: raise KeyError( +[rank3]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.btate/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank7]: Traceback (most recent call last): (RANK 7) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank7]: raise KeyError( +[rank7]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" + +ias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank3]: Traceback (most recent call last): (RANK 4) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank3]: raise KeyError( +[rank3]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1ear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" + +fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank2]: Traceback (most recent call last): (RANK 3) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank2]: raise KeyError( +[rank2]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.b.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank3]: Traceback (most recent call last): (RANK 5) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank3]: raise KeyError( +[rank3]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank2]: Traceback (most recent call last): (RANK 4) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank2]: raise KeyError( +[rank2]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1ias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank3]: Traceback (most recent call last): (RANK 6) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank3]: raise KeyError( +[rank3]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank3]: Traceback (most recent call last): (RANK 7) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank3]: raise KeyError( +[rank3]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" + +.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank2]: Traceback (most recent call last): (RANK 5) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank2]: raise KeyError( +[rank2]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank2]: Traceback (most recent call last): (RANK 6) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank2]: raise KeyError( +[rank2]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" +[rank2]: Traceback (most recent call last): (RANK 7) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 558, in _validate_global_shapes +[rank2]: raise KeyError( +[rank2]: KeyError: "optimizer.state.exp_avg.embedding.position_embeddings.weight from model not in state dict: ['decoder.final_layernorm._extra_state/shard_0_1', 'decoder.final_layernorm.bias', 'decoder.final_layernorm.weight', 'decoder.layers.mlp.linear_fc1._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc1._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc1.bias', 'decoder.layers.mlp.linear_fc1.layer_norm_bias', 'decoder.layers.mlp.linear_fc1.layer_norm_weight', 'decoder.layers.mlp.linear_fc1.weight', 'decoder.layers.mlp.linear_fc2._extra_state/shard_0_2', 'decoder.layers.mlp.linear_fc2._extra_state/shard_1_2', 'decoder.layers.mlp.linear_fc2.bias', 'decoder.layers.mlp.linear_fc2.weight', 'decoder.layers.self_attention.core_attention._extra_state/shard_0_2', 'decoder.layers.self_attention.core_attention._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_proj._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_proj.bias', 'decoder.layers.self_attention.linear_proj.weight', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_0_2', 'decoder.layers.self_attention.linear_qkv._extra_state/shard_1_2', 'decoder.layers.self_attention.linear_qkv.bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'decoder.layers.self_attention.linear_qkv.weight', 'embedding.position_embeddings.weight', 'embedding.word_embeddings.weight', 'optimizer.state.fp32_param.decoder.final_layernorm.bias', 'optimizer.state.fp32_param.decoder.final_layernorm.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc1.weight', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.bias', 'optimizer.state.fp32_param.decoder.layers.mlp.linear_fc2.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_proj.weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_bias', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.layer_norm_weight', 'optimizer.state.fp32_param.decoder.layers.self_attention.linear_qkv.weight', 'optimizer.state.fp32_param.embedding.position_embeddings.weight', 'optimizer.state.fp32_param.embedding.word_embeddings.weight', 'rerun_state_machine_state/shard_0.0_1.8', 'rerun_state_machine_state/shard_0.1_1.8', 'rerun_state_machine_state/shard_0.2_1.8', 'rerun_state_machine_state/shard_0.3_1.8', 'rerun_state_machine_state/shard_0.4_1.8', 'rerun_state_machine_state/shard_0.5_1.8', 'rerun_state_machine_state/shard_0.6_1.8', 'rerun_state_machine_state/shard_0.7_1.8', 'rng_state/shard_0.0_1.8', 'rng_state/shard_0.1_1.8', 'rng_state/shard_0.2_1.8', 'rng_state/shard_0.3_1.8', 'rng_state/shard_0.4_1.8', 'rng_state/shard_0.5_1.8', 'rng_state/shard_0.6_1.8', 'rng_state/shard_0.7_1.8']" + +[rank6]:[W621 21:23:30.612576839 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 21:23:30.635633826 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 21:23:30.708211991 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 21:23:30.715567195 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 21:23:30.738222815 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 21:23:30.746301887 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 21:23:30.748933352 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank0]:[W621 21:23:30.752013288 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 21:23:31.674000 1517667 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1517738 closing signal SIGTERM +W0621 21:23:31.677000 1517667 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1517739 closing signal SIGTERM +W0621 21:23:31.677000 1517667 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1517740 closing signal SIGTERM +W0621 21:23:31.677000 1517667 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1517742 closing signal SIGTERM +W0621 21:23:31.678000 1517667 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1517743 closing signal SIGTERM +W0621 21:23:31.678000 1517667 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1517744 closing signal SIGTERM +W0621 21:23:31.678000 1517667 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1517745 closing signal SIGTERM +E0621 21:23:32.307000 1517667 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 3 (pid: 1517741) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:23:31 + host : fs-mbz-gpu-661 + rank : 3 (local_rank: 3) + exitcode : 1 (pid: 1517741) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=12288 ++ PROF_CTX_LENGTH=12288 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L12288*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L12288*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=12288, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 12288 --max-position-embeddings 12288 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:23:35.227000 1519549 site-packages/torch/distributed/run.py:766] +W0621 21:23:35.227000 1519549 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:23:35.227000 1519549 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:23:35.227000 1519549 site-packages/torch/distributed/run.py:766] ***************************************** +[rank0]:[W621 21:23:56.500176980 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:23:56.648171893 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:23:56.670895937 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:23:56.673095341 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:23:56.675900591 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:23:56.677130224 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:23:56.677756179 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:23:56.686646424 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank4]:[W621 21:24:23.664721649 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 21:24:23.751421365 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 21:24:23.765434674 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 21:24:23.775773006 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 21:24:23.806865651 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank0]:[W621 21:24:23.829161531 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 21:24:23.847641905 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 21:24:24.013896332 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=16384 ++ PROF_CTX_LENGTH=16384 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=16384, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh +rm: cannot remove 'gpt-checkpoint/': Directory not empty ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:24:31.630000 1522298 site-packages/torch/distributed/run.py:766] +W0621 21:24:31.630000 1522298 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:24:31.630000 1522298 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:24:31.630000 1522298 site-packages/torch/distributed/run.py:766] ***************************************** +[rank0]:[W621 21:24:54.097694054 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:24:54.110341540 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:24:54.118763873 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:24:54.118981897 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:24:54.121897714 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:24:54.123459050 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:24:54.123665043 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:24:54.126362506 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank2]:[W621 21:25:23.263602686 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 21:25:23.392490449 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 21:25:23.445456650 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 21:25:23.452715787 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 21:25:23.497915927 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 21:25:23.507624183 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank0]:[W621 21:25:23.526746650 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 21:25:23.556670152 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=24576 ++ PROF_CTX_LENGTH=24576 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L24576*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L24576*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=24576, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 24576 --max-position-embeddings 24576 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:25:31.479000 1525169 site-packages/torch/distributed/run.py:766] +W0621 21:25:31.479000 1525169 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:25:31.479000 1525169 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:25:31.479000 1525169 site-packages/torch/distributed/run.py:766] ***************************************** +[rank6]:[W621 21:25:53.442702770 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 21:25:53.446379678 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:25:53.447676047 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:25:53.450901607 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:25:53.453957589 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:25:53.454020206 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:25:53.454560517 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:25:53.461840665 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain +[rank0]: save_checkpoint( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint +[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy, +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save +[rank0]: sharded_strategy.save(sharded_state_dict, checkpoint_dir) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save +[rank0]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save +[rank0]: async_calls.maybe_finalize_async_calls(blocking=True) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls +[rank0]: finalize_fn() +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn +[rank0]: save_state_dict_async_finalize(*save_state_dict_ret) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 243, in save_state_dict_async_finalize +[rank0]: storage_writer.finish(global_metadata, all_results) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 483, in finish +[rank0]: super().finish(metadata, results) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 697, in finish +[rank0]: with self.fs.create_stream(tmp_path, "wb") as metadata_file: +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/contextlib.py", line 137, in __enter__ +[rank0]: return next(self.gen) +[rank0]: ^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 476, in create_stream +[rank0]: with path.open(mode) as stream: +[rank0]: ^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/pathlib.py", line 1013, in open +[rank0]: return io.open(self, mode, buffering, encoding, errors, newline) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: FileNotFoundError: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010/.metadata.tmp' +[rank0]:[W621 21:27:07.359814328 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 21:27:13.172000 1525169 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1525241 closing signal SIGTERM +W0621 21:27:13.175000 1525169 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1525242 closing signal SIGTERM +W0621 21:27:13.178000 1525169 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1525243 closing signal SIGTERM +W0621 21:27:13.181000 1525169 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1525244 closing signal SIGTERM +W0621 21:27:13.185000 1525169 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1525245 closing signal SIGTERM +W0621 21:27:13.196000 1525169 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1525246 closing signal SIGTERM +W0621 21:27:13.200000 1525169 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1525247 closing signal SIGTERM +E0621 21:27:14.808000 1525169 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 1525240) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:27:13 + host : fs-mbz-gpu-661 + rank : 0 (local_rank: 0) + exitcode : 1 (pid: 1525240) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=32768 ++ PROF_CTX_LENGTH=32768 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L32768*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L32768*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=32768, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 32768 --max-position-embeddings 32768 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:27:18.180000 1527697 site-packages/torch/distributed/run.py:766] +W0621 21:27:18.180000 1527697 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:27:18.180000 1527697 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:27:18.180000 1527697 site-packages/torch/distributed/run.py:766] ***************************************** +[rank6]:[W621 21:27:39.913669667 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 21:27:39.371183485 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:27:39.381606243 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:27:39.383112159 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:27:39.384449116 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:27:39.385875531 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:27:39.385969860 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:27:39.392556727 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain +[rank0]: save_checkpoint( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint +[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy, +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 386, in save +[rank0]: common_strategy.save_common(state_dict, checkpoint_dir) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/common.py", line 48, in save_common +[rank0]: torch.save(common_state_dict, path) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 964, in save +[rank0]: with _open_zipfile_writer(f) as opened_zipfile: +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 828, in _open_zipfile_writer +[rank0]: return container(name_or_buffer) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/serialization.py", line 792, in __init__ +[rank0]: torch._C.PyTorchFileWriter( +[rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist. +[rank0]:[W621 21:28:58.510459455 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 21:29:03.575000 1527697 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1527767 closing signal SIGTERM +W0621 21:29:03.578000 1527697 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1527768 closing signal SIGTERM +W0621 21:29:03.579000 1527697 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1527769 closing signal SIGTERM +W0621 21:29:03.582000 1527697 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1527770 closing signal SIGTERM +W0621 21:29:03.597000 1527697 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1527771 closing signal SIGTERM +W0621 21:29:03.601000 1527697 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1527772 closing signal SIGTERM +W0621 21:29:03.605000 1527697 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 1527773 closing signal SIGTERM +E0621 21:29:04.954000 1527697 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 1527766) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_21:29:03 + host : fs-mbz-gpu-661 + rank : 0 (local_rank: 0) + exitcode : 1 (pid: 1527766) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=40960 ++ PROF_CTX_LENGTH=40960 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L40960*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L40960*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=40960, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 40960 --max-position-embeddings 40960 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:29:08.114000 1530121 site-packages/torch/distributed/run.py:766] +W0621 21:29:08.114000 1530121 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:29:08.114000 1530121 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:29:08.114000 1530121 site-packages/torch/distributed/run.py:766] ***************************************** +[rank0]:[W621 21:29:30.915923901 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 21:29:30.920826167 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 21:29:30.922181895 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 21:29:30.925484723 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 21:29:30.925572308 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 21:29:30.927545996 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 21:29:30.932604993 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 21:29:30.940394882 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank2]:[W621 21:30:48.602944615 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 21:30:48.655503221 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 21:30:48.675048742 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 21:30:48.687623260 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 21:30:48.714145977 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 21:30:48.728168639 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 21:30:48.821435705 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank0]:[W621 21:30:49.876637260 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=49152 ++ PROF_CTX_LENGTH=49152 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L49152*tp8.cp1.bs1.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L49152*tp8.cp1.bs1.json' ']' ++ echo 'Running ctx_length=49152, TP_SIZE=8, CP_SIZE=1, BATCH_SIZE=1' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343207 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-661:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 1 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 49152 --max-position-embeddings 49152 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 21:31:00.098000 1533063 site-packages/torch/distributed/run.py:766] +W0621 21:31:00.098000 1533063 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 21:31:00.098000 1533063 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 21:31:00.098000 1533063 site-packages/torch/distributed/run.py:766] *****************************************